code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
UpperCamelCase = ["""pixel_values"""]
def __init__( self :Optional[int] , __snake_case :bool = True , __snake_case :Union[int, float] = 1 / 2_55 , __snake_case :bool = True , __snake_case :int = 8 , **__snake_case :int , ):
'''simple docstring'''
super().__init__(**__snake_case )
__magic_name__ : Optional[Any] =do_rescale
__magic_name__ : List[Any] =rescale_factor
__magic_name__ : Dict =do_pad
__magic_name__ : Tuple =pad_size
def A__ ( self :List[str] , __snake_case :np.ndarray , __snake_case :float , __snake_case :Optional[Union[str, ChannelDimension]] = None , **__snake_case :Tuple ):
'''simple docstring'''
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def A__ ( self :List[Any] , __snake_case :np.ndarray , __snake_case :int , __snake_case :Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Optional[int] =get_image_size(__snake_case )
__magic_name__ : List[Any] =(old_height // size + 1) * size - old_height
__magic_name__ : Union[str, Any] =(old_width // size + 1) * size - old_width
return pad(__snake_case , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=__snake_case )
def A__ ( self :Union[str, Any] , __snake_case :ImageInput , __snake_case :Optional[bool] = None , __snake_case :Optional[float] = None , __snake_case :Optional[bool] = None , __snake_case :Optional[int] = None , __snake_case :Optional[Union[str, TensorType]] = None , __snake_case :Union[str, ChannelDimension] = ChannelDimension.FIRST , **__snake_case :Tuple , ):
'''simple docstring'''
__magic_name__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : str =rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : Dict =do_pad if do_pad is not None else self.do_pad
__magic_name__ : Union[str, Any] =pad_size if pad_size is not None else self.pad_size
__magic_name__ : int =make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__magic_name__ : Optional[int] =[to_numpy_array(__snake_case ) for image in images]
if do_rescale:
__magic_name__ : Any =[self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_pad:
__magic_name__ : Optional[Any] =[self.pad(__snake_case , size=__snake_case ) for image in images]
__magic_name__ : Any =[to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
__magic_name__ : Dict ={"""pixel_values""": images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 21 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Optional[int] ) -> Dict:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : int ,__a : List[str]="attention" ) -> List[str]:
"""simple docstring"""
_a : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Tuple = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_a : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Any = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_a : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Union[str, Any] ,__a : List[Any] ,__a : Any=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : List[str] = (wi_a, wi_a)
else:
_a : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : str ) -> List[str]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCAmelCase ( __a : dict ,*, __a : int ,__a : bool ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : Dict = traverse_util.flatten_dict(variables['''target'''] )
_a : Any = {'''/'''.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : Optional[int] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,__a )
_a : Tuple = collections.OrderedDict()
# Shared embeddings.
_a : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Optional[Any] = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_attention_layer_norm''' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(__a ,__a ,'''encoder''' ,'''attention''' )
_a : List[str] = layer_norm
_a : Optional[Any] = k.T
_a : str = o.T
_a : List[Any] = q.T
_a : Tuple = v.T
# Block i, layer 1 (MLP).
_a : str = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Any = tax_mlp_lookup(__a ,__a ,'''encoder''' ,__a )
_a : str = layer_norm
if split_mlp_wi:
_a : List[Any] = wi[0].T
_a : Any = wi[1].T
else:
_a : Any = wi.T
_a : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Dict = tax_relpos_bias_lookup(
__a ,__a ,'''encoder''' ).T
_a : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_a : List[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''encoder''' ).T
_a : Optional[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Union[str, Any] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_a , _a , _a , _a : Optional[Any] = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''self_attention''' )
_a : Optional[Any] = layer_norm
_a : Dict = k.T
_a : str = o.T
_a : str = q.T
_a : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_a : Any = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_a , _a , _a , _a : str = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''encoder_decoder_attention''' )
_a : Optional[Any] = layer_norm
_a : Optional[int] = k.T
_a : Dict = o.T
_a : str = q.T
_a : int = v.T
# Block i, layer 2 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Tuple = tax_mlp_lookup(__a ,__a ,'''decoder''' ,__a )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : List[Any] = wi[1].T
else:
_a : Dict = wi.T
_a : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Tuple = tax_relpos_bias_lookup(__a ,__a ,'''decoder''' ).T
_a : Tuple = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : Any = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( __a : Dict ,__a : bool ) -> Tuple:
"""simple docstring"""
_a : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_a : str = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
_a : List[str] = checkpoints.load_tax_checkpoint(__a )
_a : str = convert_tax_to_pytorch(
__a ,num_layers=config.num_layers ,is_encoder_only=__a ,scalable_attention=__a )
_a : str = make_state_dict(__a ,__a )
model.load_state_dict(__a ,strict=__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ,__a : Union[str, Any] ,__a : bool = False ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = MTaConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(__a )
else:
_a : Tuple = UMTaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a ,__a ,__a ,__a ,__a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 14 | 0 |
'''simple docstring'''
_snake_case : int = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 22 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a__ = '''Usage of script: script_name <size_of_canvas:int>'''
a__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( __a : int ) -> list[list[bool]]:
"""simple docstring"""
_a : int = [[False for i in range(__a )] for j in range(__a )]
return canvas
def __UpperCAmelCase ( __a : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
_a : Optional[int] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( __a : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
_a : Any = np.array(__a )
_a : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
_a : Tuple = __judge_point(
__a ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( __a : bool ,__a : list[list[bool]] ) -> bool:
"""simple docstring"""
_a : Optional[Any] = 0
_a : str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a : Optional[int] = pt
if pt:
if alive < 2:
_a : Dict = False
elif alive == 2 or alive == 3:
_a : Optional[Any] = True
elif alive > 3:
_a : str = False
else:
if alive == 3:
_a : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a__ = int(sys.argv[1])
# main working structure of this module.
a__ = create_canvas(canvas_size)
seed(c)
a__ , a__ = plt.subplots()
fig.show()
a__ = ListedColormap(['''w''', '''k'''])
try:
while True:
a__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 14 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
snake_case__ : Union[str, Any] = """__DUMMY_TRANSFORMERS_USER__"""
snake_case__ : Optional[int] = """Dummy User"""
snake_case__ : Any = """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"""
snake_case__ : List[Any] = """https://hub-ci.huggingface.co"""
snake_case__ : Dict = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
snake_case__ : Dict = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
snake_case__ : Optional[int] = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def _snake_case (__lowercase):
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , __lowercase)
@pytest.fixture
def _snake_case (__lowercase):
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , __lowercase)
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , __lowercase)
@pytest.fixture
def _snake_case (__lowercase):
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , __lowercase)
@pytest.fixture
def _snake_case (__lowercase , __lowercase):
HfFolder.save_token(__lowercase)
yield
HfFolder.delete_token()
@pytest.fixture(scope='session')
def _snake_case ():
return HfApi(endpoint=__lowercase)
@pytest.fixture(scope='session')
def _snake_case (__lowercase):
UpperCamelCase_ = HfFolder.get_token()
HfFolder.save_token(__lowercase)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__lowercase)
@pytest.fixture
def _snake_case (__lowercase):
def _cleanup_repo(__lowercase):
hf_api.delete_repo(__lowercase , token=__lowercase , repo_type='dataset')
return _cleanup_repo
@pytest.fixture
def _snake_case (__lowercase):
@contextmanager
def _temporary_repo(__lowercase):
try:
yield repo_id
finally:
cleanup_repo(__lowercase)
return _temporary_repo
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = f"""repo_txt_data-{int(time.time() * 10e3)}"""
UpperCamelCase_ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__lowercase , token=__lowercase , repo_type='dataset' , private=__lowercase)
hf_api.upload_file(
token=__lowercase , path_or_fileobj=str(__lowercase) , path_in_repo='data/text_data.txt' , repo_id=__lowercase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__lowercase , token=__lowercase , repo_type='dataset')
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _snake_case (__lowercase , __lowercase , __lowercase):
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = f"""repo_zipped_txt_data-{int(time.time() * 10e3)}"""
UpperCamelCase_ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__lowercase , token=__lowercase , repo_type='dataset' , private=__lowercase)
hf_api.upload_file(
token=__lowercase , path_or_fileobj=str(__lowercase) , path_in_repo='data.zip' , repo_id=__lowercase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__lowercase , token=__lowercase , repo_type='dataset')
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _snake_case (__lowercase , __lowercase , __lowercase):
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session')
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = f"""repo_zipped_img_data-{int(time.time() * 10e3)}"""
UpperCamelCase_ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__lowercase , token=__lowercase , repo_type='dataset' , private=__lowercase)
hf_api.upload_file(
token=__lowercase , path_or_fileobj=str(__lowercase) , path_in_repo='data.zip' , repo_id=__lowercase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(__lowercase , token=__lowercase , repo_type='dataset')
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _snake_case (__lowercase , __lowercase , __lowercase):
return hf_private_dataset_repo_zipped_img_data_
| 23 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 | 0 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : List[str] = XGLMTokenizer
__lowercase : Union[str, Any] = XGLMTokenizerFast
__lowercase : Optional[int] = True
__lowercase : List[str] = True
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = XGLMTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1008 )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = XGLMTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__snake_case = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__SCREAMING_SNAKE_CASE , f.name )
__snake_case = XGLMTokenizer(f.name , keep_accents=__SCREAMING_SNAKE_CASE )
__snake_case = pickle.dumps(__SCREAMING_SNAKE_CASE )
pickle.loads(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = '''Hello World!'''
__snake_case = [2, 3_1227, 4447, 35]
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
__snake_case = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) )
@slow
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = {
'''input_ids''': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''facebook/xglm-564M''' , padding=__SCREAMING_SNAKE_CASE , )
| 24 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 | 0 |
def lowerCamelCase__ ( _a):
if not isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Tuple = f"Input value of [number={number}] must be an integer"
raise TypeError(_a)
if number < 0:
return False
SCREAMING_SNAKE_CASE : Union[str, Any] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : List[str] = F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _a ( _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Dict = emb.weight.shape
__snake_case : Optional[int] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def _a ( _lowerCamelCase , _lowerCamelCase=None ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {}
for old_key in state_dict.keys():
__snake_case : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case : Tuple = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' )
else:
__snake_case : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__snake_case : Dict = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__snake_case : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__snake_case : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__snake_case : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__snake_case : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__snake_case : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__snake_case : str = state_dict[old_key]
return new_dict
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = []
__snake_case : Dict = 0
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__snake_case : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowerCamelCase ):
__snake_case : Dict = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[Any] = os.path.join(
_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
torch.save(_lowerCamelCase , _lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{len(_lowerCamelCase )+1:05d}-of-???.bin''' ) )
__snake_case : str = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__snake_case : Optional[Any] = rename_fairseq_keys(_lowerCamelCase , _lowerCamelCase )
__snake_case : List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__snake_case : Optional[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
torch.save(_lowerCamelCase , _lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase , _lowerCamelCase )
# Otherwise, let's build the index
__snake_case : Tuple = {}
for idx, shard in enumerate(_lowerCamelCase ):
__snake_case : Any = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin''' )
__snake_case : int = os.path.join(_lowerCamelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
for key in shard:
__snake_case : str = shard_file
# Add the metadata
__snake_case : Optional[Any] = {"""total_size""": total_size}
__snake_case : int = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" ) as f:
__snake_case : Union[str, Any] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCamelCase = parser.parse_args()
__UpperCamelCase , __UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCamelCase = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 26 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ,__a : Any ) -> int:
"""simple docstring"""
_a : int = 0
if start < end:
_a : Tuple = randint(__a ,__a )
_a : Tuple = a[end]
_a : List[str] = a[pivot]
_a : Any = temp
_a , _a : Optional[int] = _in_place_partition(__a ,__a ,__a )
count += _in_place_quick_sort(__a ,__a ,p - 1 )
count += _in_place_quick_sort(__a ,p + 1 ,__a )
return count
def __UpperCAmelCase ( __a : List[Any] ,__a : Tuple ,__a : Dict ) -> Dict:
"""simple docstring"""
_a : Dict = 0
_a : Tuple = randint(__a ,__a )
_a : List[Any] = a[end]
_a : str = a[pivot]
_a : str = temp
_a : Dict = start - 1
for index in range(__a ,__a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : int = new_pivot_index + 1
_a : Any = a[new_pivot_index]
_a : Optional[int] = a[index]
_a : str = temp
_a : Union[str, Any] = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : Any = temp
return new_pivot_index + 1, count
a__ = TemporaryFile()
a__ = 100 # 1000 elements are to be sorted
a__ , a__ = 0, 1 # mean and standard deviation
a__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ = np.load(outfile)
a__ = len(M) - 1
a__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 14 | 0 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> list:
"""simple docstring"""
_A = False
while is_sorted is False: # Until all the indices are traversed keep looping
_A = True
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
_A, _A = input_list[i + 1], input_list[i]
# swapping if elements not in order
_A = False
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
_A, _A = input_list[i + 1], input_list[i]
# swapping if elements not in order
_A = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
__A : Dict = [int(x) for x in input().split()]
# inputing elements of the list in one line
__A : List[Any] = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 27 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14 | 0 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCamelCase_ = pytest.mark.integration
UpperCamelCase_ = {"comet"}
UpperCamelCase_ = importlib.util.find_spec("fairseq") is not None
UpperCamelCase_ = {"code_eval"}
UpperCamelCase_ = os.name == "nt"
UpperCamelCase_ = {"bertscore", "frugalscore", "perplexity"}
UpperCamelCase_ = importlib.util.find_spec("transformers") is not None
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
@wraps(__UpperCamelCase )
def wrapper(self: Optional[Any] ,__UpperCamelCase: Tuple ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
@wraps(__UpperCamelCase )
def wrapper(self: Any ,__UpperCamelCase: Optional[int] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def lowercase__( __UpperCamelCase: Optional[int] ):
"""simple docstring"""
@wraps(__UpperCamelCase )
def wrapper(self: Optional[int] ,__UpperCamelCase: Tuple ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@local
class _a ( parameterized.TestCase ):
'''simple docstring'''
A : Any = {}
A : List[Any] = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = '[...]'
SCREAMING_SNAKE_CASE : List[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics', A ) ).module_path )
SCREAMING_SNAKE_CASE : int = datasets.load.import_main_class(metric_module.__name__, dataset=A )
# check parameters
SCREAMING_SNAKE_CASE : int = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(A, metric_module.__name__ ):
with self.use_local_metrics():
try:
SCREAMING_SNAKE_CASE : Any = doctest.testmod(A, verbose=A, raise_on_error=A )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed, 0 )
self.assertGreater(results.attempted, 1 )
@slow
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '[...]'
SCREAMING_SNAKE_CASE : str = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics', A ) ).module_path )
# run doctest
with self.use_local_metrics():
SCREAMING_SNAKE_CASE : Tuple = doctest.testmod(A, verbose=A, raise_on_error=A )
self.assertEqual(results.failed, 0 )
self.assertGreater(results.attempted, 1 )
@contextmanager
def UpperCamelCase_ ( self, A, A ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](A ):
yield
else:
yield
@contextmanager
def UpperCamelCase_ ( self ):
'''simple docstring'''
def load_local_metric(A, *A, **A ):
return load_metric(os.path.join('metrics', A ), *A, **A )
with patch('datasets.load_metric' ) as mock_load_metric:
SCREAMING_SNAKE_CASE : Tuple = load_local_metric
yield
@classmethod
def UpperCamelCase_ ( cls, A ):
'''simple docstring'''
def wrapper(A ):
SCREAMING_SNAKE_CASE : str = contextmanager(A )
SCREAMING_SNAKE_CASE : int = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def lowercase__( __UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' ,'' ,'' ) # handle pytest cli flags
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
SCREAMING_SNAKE_CASE : Optional[Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def lowercase__( __UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
import torch
def bert_cos_score_idf(__UpperCamelCase: int ,__UpperCamelCase: List[Any] ,*__UpperCamelCase: Optional[Any] ,**__UpperCamelCase: Dict ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__UpperCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
SCREAMING_SNAKE_CASE : str = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def lowercase__( __UpperCamelCase: Dict ):
"""simple docstring"""
def load_from_checkpoint(__UpperCamelCase: Any ):
class _a :
'''simple docstring'''
def UpperCamelCase_ ( self, A, *A, **A ):
'''simple docstring'''
assert len(A ) == 2
SCREAMING_SNAKE_CASE : Optional[int] = [0.19, 0.92]
return scores, sum(A ) / len(A )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
SCREAMING_SNAKE_CASE : Any = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
SCREAMING_SNAKE_CASE : Dict = load_from_checkpoint
yield
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = load_metric(os.path.join('metrics' ,'seqeval' ) )
SCREAMING_SNAKE_CASE : List[Any] = 'ERROR'
SCREAMING_SNAKE_CASE : Union[str, Any] = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"
with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ):
metric.compute(predictions=[] ,references=[] ,scheme=__UpperCamelCase )
| 28 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
from collections.abc import Iterable
from typing import Generic, TypeVar
__a = TypeVar('_T')
class __a( Generic[_T] ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE = None ) -> None:
UpperCAmelCase_ : list[_T] = list(iterable or [] )
UpperCAmelCase_ : list[_T] = []
def __len__( self ) -> int:
return len(self._stacka ) + len(self._stacka )
def __repr__( self ) -> str:
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> None:
self._stacka.append(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> _T:
UpperCAmelCase_ : str = self._stacka.pop
UpperCAmelCase_ : List[str] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod() | 30 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 0 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : int ):
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
SCREAMING_SNAKE_CASE_ = img
SCREAMING_SNAKE_CASE_ = img.shape[1]
SCREAMING_SNAKE_CASE_ = img.shape[0]
SCREAMING_SNAKE_CASE_ = dst_width
SCREAMING_SNAKE_CASE_ = dst_height
SCREAMING_SNAKE_CASE_ = self.src_w / self.dst_w
SCREAMING_SNAKE_CASE_ = self.src_h / self.dst_h
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCAmelCase_ ( self : Optional[int] ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
SCREAMING_SNAKE_CASE_ = self.img[self.get_y(_lowerCAmelCase )][self.get_x(_lowerCAmelCase )]
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : int ):
return int(self.ratio_x * x )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : int ):
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 800, 600
lowerCamelCase__ : Any = imread('image_data/lena.jpg', 1)
lowerCamelCase__ : List[str] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows() | 31 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __UpperCamelCase ( A__ ):
__A : int = """xglm"""
__A : Tuple = ["""past_key_values"""]
__A : Union[str, Any] = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , _UpperCamelCase=256008 , _UpperCamelCase=2048 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = ffn_dim
_UpperCAmelCase = num_layers
_UpperCAmelCase = attention_heads
_UpperCAmelCase = activation_function
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = init_std
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = use_cache
super().__init__(
pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , **_UpperCamelCase , ) | 32 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14 | 0 |
lowerCamelCase__ : List[Any] = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 33 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 | 0 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14 | 0 |
def a ( A__ ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE__ : List[Any] = grid[0]
for row_n in range(1 , len(A__ ) ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[row_n]
SCREAMING_SNAKE_CASE__ : Dict = fill_row(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Tuple = grid[row_n]
return grid[-1][-1]
def a ( A__ , A__ ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(A__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
from math import pi
def lowercase ( __A : int , __A : int ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 36 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = name
_a : List[str] = value
_a : List[str] = weight
def __repr__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __lowercase ( self ) -> List[Any]:
return self.value
def __lowercase ( self ) -> int:
return self.name
def __lowercase ( self ) -> Optional[int]:
return self.weight
def __lowercase ( self ) -> Optional[Any]:
return self.value / self.weight
def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : List[str] ) -> List[str]:
"""simple docstring"""
_a : Optional[int] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( __a : int ,__a : Union[str, Any] ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = sorted(__a ,key=__a ,reverse=__a )
_a : Any = []
_a , _a : Optional[int] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
from __future__ import annotations
from collections import deque
class A__ :
"""simple docstring"""
def __init__( self : Any , lowerCamelCase__ : list[str] ):
a__ : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(lowerCamelCase__ )
self.set_fail_transitions()
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : str ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def _UpperCamelCase( self : Any , lowerCamelCase__ : str ):
a__ : List[str] = 0
for character in keyword:
a__ : Tuple = self.find_next_state(lowerCamelCase__ , lowerCamelCase__ )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
a__ : Union[str, Any] = len(self.adlist ) - 1
else:
a__ : List[str] = next_state
self.adlist[current_state]["output"].append(lowerCamelCase__ )
def _UpperCamelCase( self : List[Any] ):
a__ : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCamelCase__ )
a__ : Tuple = 0
while q:
a__ : str = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCamelCase__ )
a__ : Tuple = self.adlist[r]["fail_state"]
while (
self.find_next_state(lowerCamelCase__ , self.adlist[child]["value"] ) is None
and state != 0
):
a__ : List[Any] = self.adlist[state]["fail_state"]
a__ : Optional[int] = self.find_next_state(
lowerCamelCase__ , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
a__ : Dict = 0
a__ : Union[str, Any] = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str ):
a__ : dict = {} # returns a dict with keywords and list of its occurrences
a__ : Tuple = 0
for i in range(len(lowerCamelCase__ ) ):
while (
self.find_next_state(lowerCamelCase__ , string[i] ) is None
and current_state != 0
):
a__ : Union[str, Any] = self.adlist[current_state]["fail_state"]
a__ : Optional[Any] = self.find_next_state(lowerCamelCase__ , string[i] )
if next_state is None:
a__ : str = 0
else:
a__ : int = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
a__ : Optional[Any] = []
result[key].append(i - len(lowerCamelCase__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
A_ , A_ , A_ : Union[str, Any] = False, False, False
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = None
# Automatically constructed
lowerCamelCase__ = "dict"
lowerCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCamelCase__ = field(default='''Audio''' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ):
return self.pa_type
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"bytes": None, "path": value}
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case__ : Tuple = BytesIO()
sf.write(__SCREAMING_SNAKE_CASE , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case__ : List[str] = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
snake_case__ : Tuple = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_2_7_6_7
snake_case__ : str = BytesIO(bytes() )
sf.write(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
snake_case__ , snake_case__ : str = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err
snake_case__ : Optional[Any] = xsplitext(__SCREAMING_SNAKE_CASE )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
snake_case__ : str = token_per_repo_id or {}
snake_case__ : Tuple = path.split("""::""" )[-1]
try:
snake_case__ : str = string_to_dict(__SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )["""repo_id"""]
snake_case__ : int = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case__ : Dict = None
with xopen(__SCREAMING_SNAKE_CASE , """rb""" , use_auth_token=__SCREAMING_SNAKE_CASE ) as f:
snake_case__ , snake_case__ : Optional[int] = sf.read(__SCREAMING_SNAKE_CASE )
else:
snake_case__ , snake_case__ : Tuple = sf.read(__SCREAMING_SNAKE_CASE )
snake_case__ : str = array.T
if self.mono:
snake_case__ : str = librosa.to_mono(__SCREAMING_SNAKE_CASE )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case__ : List[Any] = librosa.resample(__SCREAMING_SNAKE_CASE , orig_sr=__SCREAMING_SNAKE_CASE , target_sr=self.sampling_rate )
snake_case__ : List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __UpperCamelCase ( self ):
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if pa.types.is_string(storage.type ):
snake_case__ : List[str] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
snake_case__ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case__ : List[str] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
snake_case__ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
snake_case__ : Dict = pa.array([Audio().encode_example(__SCREAMING_SNAKE_CASE ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
snake_case__ : Tuple = storage.field("""bytes""" )
else:
snake_case__ : Any = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
snake_case__ : List[Any] = storage.field("""path""" )
else:
snake_case__ : Union[str, Any] = pa.array([None] * len(__SCREAMING_SNAKE_CASE ) , type=pa.string() )
snake_case__ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
@no_op_if_value_is_null
def path_to_bytes(__SCREAMING_SNAKE_CASE ):
with xopen(__SCREAMING_SNAKE_CASE , """rb""" ) as f:
snake_case__ : int = f.read()
return bytes_
snake_case__ : Optional[int] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case__ : Optional[Any] = pa.array(
[os.path.basename(__SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
snake_case__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(__SCREAMING_SNAKE_CASE , self.pa_type )
| 38 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 0 |
lowerCAmelCase_ = range(2, 20 + 1)
lowerCAmelCase_ = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase_ = {}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) )
snake_case_ = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) ) )
snake_case_, snake_case_ = 0, 0
snake_case_ = n - i
snake_case_ = memo.get(SCREAMING_SNAKE_CASE__ )
if sub_memo is not None:
snake_case_ = sub_memo.get(SCREAMING_SNAKE_CASE__ )
if jumps is not None and len(SCREAMING_SNAKE_CASE__ ) > 0:
# find and make the largest jump without going over
snake_case_ = -1
for _k in range(len(SCREAMING_SNAKE_CASE__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case_ = _k
break
if max_jump >= 0:
snake_case_, snake_case_, snake_case_ = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case_ = diff + c
for j in range(min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ) ):
snake_case_, snake_case_ = divmod(SCREAMING_SNAKE_CASE__ , 10 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = []
else:
snake_case_ = {c: []}
snake_case_ = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case_, snake_case_ = next_term(SCREAMING_SNAKE_CASE__ , k - 1 , i + dn , SCREAMING_SNAKE_CASE__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case_, snake_case_ = compute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i + dn , SCREAMING_SNAKE_CASE__ )
diff += _diff
dn += terms_jumped
snake_case_ = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case_ = 0
while j < len(SCREAMING_SNAKE_CASE__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE__ , (diff, dn, k) )
return (diff, dn)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE__ ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case_ = i
snake_case_, snake_case_, snake_case_ = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case_ = ds_c + ds_b
diff += addend
snake_case_ = 0
for j in range(SCREAMING_SNAKE_CASE__ ):
snake_case_ = a_i[j] + addend
snake_case_, snake_case_ = divmod(SCREAMING_SNAKE_CASE__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return diff, i - start_i
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = digits[j] + addend
if s >= 10:
snake_case_, snake_case_ = divmod(SCREAMING_SNAKE_CASE__ , 10 )
snake_case_ = addend // 10 + quotient
else:
snake_case_ = s
snake_case_ = addend // 10
if addend == 0:
break
while addend > 0:
snake_case_, snake_case_ = divmod(SCREAMING_SNAKE_CASE__ , 10 )
digits.append(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10**15 ):
snake_case_ = [1]
snake_case_ = 1
snake_case_ = 0
while True:
snake_case_, snake_case_ = next_term(SCREAMING_SNAKE_CASE__ , 20 , i + dn , SCREAMING_SNAKE_CASE__ )
dn += terms_jumped
if dn == n - i:
break
snake_case_ = 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""") | 39 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Dict = "bert"
def __init__( self, SCREAMING_SNAKE_CASE_=3_0522, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=1e-12, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_="absolute", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Tuple = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : str = num_attention_heads
UpperCamelCase : str = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = max_position_embeddings
UpperCamelCase : List[str] = type_vocab_size
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : int = layer_norm_eps
UpperCamelCase : str = position_embedding_type
UpperCamelCase : List[str] = use_cache
UpperCamelCase : Union[str, Any] = classifier_dropout
class lowerCAmelCase_ ( a__ ):
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCamelCase : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 40 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase__ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 'albert'
def __init__( self : Any ,lowercase__ : Any=3_0_0_0_0 ,lowercase__ : Dict=1_2_8 ,lowercase__ : List[Any]=4_0_9_6 ,lowercase__ : Tuple=1_2 ,lowercase__ : Dict=1 ,lowercase__ : List[str]=6_4 ,lowercase__ : Dict=1_6_3_8_4 ,lowercase__ : List[Any]=1 ,lowercase__ : List[str]="gelu_new" ,lowercase__ : Optional[Any]=0 ,lowercase__ : Optional[Any]=0 ,lowercase__ : Optional[int]=5_1_2 ,lowercase__ : Union[str, Any]=2 ,lowercase__ : Dict=0.0_2 ,lowercase__ : Dict=1e-1_2 ,lowercase__ : List[str]=0.1 ,lowercase__ : List[Any]="absolute" ,lowercase__ : Union[str, Any]=0 ,lowercase__ : Optional[Any]=2 ,lowercase__ : List[Any]=3 ,**lowercase__ : Dict ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = inner_group_num
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = classifier_dropout_prob
__lowercase = position_embedding_type
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 41 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Dict = '''ZinengTang/tvlt-base'''
_a : List[str] = tempfile.mkdtemp()
def __lowercase ( self , **_a ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : Union[str, Any] = np.ones([1_2_0_0_0] )
_a : Dict = feature_extractor(_a , return_tensors='''np''' )
_a : Tuple = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = image_processor(_a , return_tensors='''np''' )
_a : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[str] = np.ones([1_2_0_0_0] )
_a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 14 | 0 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = model
lowerCamelCase_ = 2
lowerCamelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int:
# load longformer model from model identifier
lowerCamelCase_ = LongformerModel.from_pretrained(__UpperCamelCase )
lowerCamelCase_ = LightningModel(__UpperCamelCase )
lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
lowerCamelCase_ = LongformerForQuestionAnswering.from_pretrained(__UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__UpperCamelCase )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 42 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = '''layoutlmv3'''
def __init__( self: Optional[Any] , UpperCamelCase_: Union[str, Any]=50_265 , UpperCamelCase_: Tuple=768 , UpperCamelCase_: int=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: List[Any]=3_072 , UpperCamelCase_: Any="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: str=0.1 , UpperCamelCase_: str=512 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: str=0.02 , UpperCamelCase_: int=1E-5 , UpperCamelCase_: List[Any]=1 , UpperCamelCase_: List[str]=0 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=1_024 , UpperCamelCase_: List[str]=128 , UpperCamelCase_: List[str]=128 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Any=32 , UpperCamelCase_: Any=128 , UpperCamelCase_: Optional[int]=64 , UpperCamelCase_: str=256 , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Optional[int]=224 , UpperCamelCase_: Any=3 , UpperCamelCase_: int=16 , UpperCamelCase_: List[Any]=None , **UpperCamelCase_: List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
vocab_size=UpperCamelCase_ , hidden_size=UpperCamelCase_ , num_hidden_layers=UpperCamelCase_ , num_attention_heads=UpperCamelCase_ , intermediate_size=UpperCamelCase_ , hidden_act=UpperCamelCase_ , hidden_dropout_prob=UpperCamelCase_ , attention_probs_dropout_prob=UpperCamelCase_ , max_position_embeddings=UpperCamelCase_ , type_vocab_size=UpperCamelCase_ , initializer_range=UpperCamelCase_ , layer_norm_eps=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = max_ad_position_embeddings
lowercase__ = coordinate_size
lowercase__ = shape_size
lowercase__ = has_relative_attention_bias
lowercase__ = rel_pos_bins
lowercase__ = max_rel_pos
lowercase__ = has_spatial_attention_bias
lowercase__ = rel_ad_pos_bins
lowercase__ = max_rel_ad_pos
lowercase__ = text_embed
lowercase__ = visual_embed
lowercase__ = input_size
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = classifier_dropout
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = version.parse('''1.12''' )
@property
def lowerCamelCase_ ( self: int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def lowerCamelCase_ ( self: Any ) -> float:
"""simple docstring"""
return 1E-5
@property
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
return 12
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: "ProcessorMixin" , UpperCamelCase_: int = -1 , UpperCamelCase_: int = -1 , UpperCamelCase_: bool = False , UpperCamelCase_: Optional["TensorType"] = None , UpperCamelCase_: int = 3 , UpperCamelCase_: int = 40 , UpperCamelCase_: int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , '''apply_ocr''' , UpperCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ = processor.tokenizer.num_special_tokens_to_add(UpperCamelCase_ )
lowercase__ = compute_effective_axis_dimension(
UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
lowercase__ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowercase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowercase__ = self._generate_dummy_images(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = dict(
processor(
UpperCamelCase_ , text=UpperCamelCase_ , boxes=UpperCamelCase_ , return_tensors=UpperCamelCase_ , ) )
return inputs
| 43 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Optional[int] ) -> Dict:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : int ,__a : List[str]="attention" ) -> List[str]:
"""simple docstring"""
_a : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Tuple = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_a : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Any = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_a : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Union[str, Any] ,__a : List[Any] ,__a : Any=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : List[str] = (wi_a, wi_a)
else:
_a : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : str ) -> List[str]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCAmelCase ( __a : dict ,*, __a : int ,__a : bool ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : Dict = traverse_util.flatten_dict(variables['''target'''] )
_a : Any = {'''/'''.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : Optional[int] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,__a )
_a : Tuple = collections.OrderedDict()
# Shared embeddings.
_a : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Optional[Any] = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_attention_layer_norm''' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(__a ,__a ,'''encoder''' ,'''attention''' )
_a : List[str] = layer_norm
_a : Optional[Any] = k.T
_a : str = o.T
_a : List[Any] = q.T
_a : Tuple = v.T
# Block i, layer 1 (MLP).
_a : str = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Any = tax_mlp_lookup(__a ,__a ,'''encoder''' ,__a )
_a : str = layer_norm
if split_mlp_wi:
_a : List[Any] = wi[0].T
_a : Any = wi[1].T
else:
_a : Any = wi.T
_a : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Dict = tax_relpos_bias_lookup(
__a ,__a ,'''encoder''' ).T
_a : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_a : List[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''encoder''' ).T
_a : Optional[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Union[str, Any] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_a , _a , _a , _a : Optional[Any] = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''self_attention''' )
_a : Optional[Any] = layer_norm
_a : Dict = k.T
_a : str = o.T
_a : str = q.T
_a : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_a : Any = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_a , _a , _a , _a : str = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''encoder_decoder_attention''' )
_a : Optional[Any] = layer_norm
_a : Optional[int] = k.T
_a : Dict = o.T
_a : str = q.T
_a : int = v.T
# Block i, layer 2 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Tuple = tax_mlp_lookup(__a ,__a ,'''decoder''' ,__a )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : List[Any] = wi[1].T
else:
_a : Dict = wi.T
_a : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Tuple = tax_relpos_bias_lookup(__a ,__a ,'''decoder''' ).T
_a : Tuple = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : Any = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( __a : Dict ,__a : bool ) -> Tuple:
"""simple docstring"""
_a : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_a : str = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
_a : List[str] = checkpoints.load_tax_checkpoint(__a )
_a : str = convert_tax_to_pytorch(
__a ,num_layers=config.num_layers ,is_encoder_only=__a ,scalable_attention=__a )
_a : str = make_state_dict(__a ,__a )
model.load_state_dict(__a ,strict=__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ,__a : Union[str, Any] ,__a : bool = False ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = MTaConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(__a )
else:
_a : Tuple = UMTaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a ,__a ,__a ,__a ,__a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 14 | 0 |
'''simple docstring'''
def A_ ( _lowerCAmelCase : int = 50 ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''') | 44 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a__ = '''Usage of script: script_name <size_of_canvas:int>'''
a__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( __a : int ) -> list[list[bool]]:
"""simple docstring"""
_a : int = [[False for i in range(__a )] for j in range(__a )]
return canvas
def __UpperCAmelCase ( __a : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
_a : Optional[int] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( __a : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
_a : Any = np.array(__a )
_a : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
_a : Tuple = __judge_point(
__a ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( __a : bool ,__a : list[list[bool]] ) -> bool:
"""simple docstring"""
_a : Optional[Any] = 0
_a : str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a : Optional[int] = pt
if pt:
if alive < 2:
_a : Dict = False
elif alive == 2 or alive == 3:
_a : Optional[Any] = True
elif alive > 3:
_a : str = False
else:
if alive == 3:
_a : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a__ = int(sys.argv[1])
# main working structure of this module.
a__ = create_canvas(canvas_size)
seed(c)
a__ , a__ = plt.subplots()
fig.show()
a__ = ListedColormap(['''w''', '''k'''])
try:
while True:
a__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 14 | 0 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :str , lowerCamelCase__ :Distribution , lowerCamelCase__ :Any=None , lowerCamelCase__ :List[Any]=None , lowerCamelCase__ :List[str]=0 ):
UpperCamelCase__ :int = 1.0 if scale is None else scale
UpperCamelCase__ :Union[str, Any] = 0.0 if loc is None else loc
super().__init__(lowerCamelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase__ )] )
@property
def __a ( self :Any ):
return self.base_dist.mean * self.scale + self.loc
@property
def __a ( self :List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def __a ( self :Optional[int] ):
return self.variance.sqrt()
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self :int , lowerCamelCase__ :int , lowerCamelCase__ :Dict[str, int] , lowerCamelCase__ :Callable[..., Tuple[torch.Tensor]] , **lowerCamelCase__ :Any ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = args_dim
UpperCamelCase__ :Dict = nn.ModuleList([nn.Linear(lowerCamelCase__ , lowerCamelCase__ ) for dim in args_dim.values()] )
UpperCamelCase__ :List[str] = domain_map
def __a ( self :Tuple , lowerCamelCase__ :torch.Tensor ):
UpperCamelCase__ :Optional[Any] = [proj(lowerCamelCase__ ) for proj in self.proj]
return self.domain_map(*lowerCamelCase__ )
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self :Dict , lowerCamelCase__ :Optional[int] ):
super().__init__()
UpperCamelCase__ :Union[str, Any] = function
def __a ( self :int , lowerCamelCase__ :Union[str, Any] , *lowerCamelCase__ :Tuple ):
return self.function(lowerCamelCase__ , *lowerCamelCase__ )
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : type
_snake_case : int
_snake_case : Dict[str, int]
def __init__( self :Tuple , lowerCamelCase__ :int = 1 ):
UpperCamelCase__ :str = dim
UpperCamelCase__ :List[str] = {k: dim * self.args_dim[k] for k in self.args_dim}
def __a ( self :Union[str, Any] , lowerCamelCase__ :Union[str, Any] ):
if self.dim == 1:
return self.distribution_class(*lowerCamelCase__ )
else:
return Independent(self.distribution_class(*lowerCamelCase__ ) , 1 )
def __a ( self :Optional[int] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[torch.Tensor] = None , lowerCamelCase__ :Optional[torch.Tensor] = None , ):
UpperCamelCase__ :Union[str, Any] = self._base_distribution(lowerCamelCase__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase__ , loc=lowerCamelCase__ , scale=lowerCamelCase__ , event_dim=self.event_dim )
@property
def __a ( self :Optional[Any] ):
return () if self.dim == 1 else (self.dim,)
@property
def __a ( self :List[str] ):
return len(self.event_shape )
@property
def __a ( self :Any ):
return 0.0
def __a ( self :Any , lowerCamelCase__ :int ):
return ParameterProjection(
in_features=lowerCamelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __a ( self :Union[str, Any] , *lowerCamelCase__ :torch.Tensor ):
raise NotImplementedError()
@staticmethod
def __a ( lowerCamelCase__ :torch.Tensor ):
return (x + torch.sqrt(torch.square(lowerCamelCase__ ) + 4.0 )) / 2.0
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
_snake_case : type = StudentT
@classmethod
def __a ( cls :List[str] , lowerCamelCase__ :torch.Tensor , lowerCamelCase__ :torch.Tensor , lowerCamelCase__ :torch.Tensor ):
UpperCamelCase__ :Dict = cls.squareplus(lowerCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCamelCase__ :Dict = 2.0 + cls.squareplus(lowerCamelCase__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict[str, int] = {"loc": 1, "scale": 1}
_snake_case : type = Normal
@classmethod
def __a ( cls :Union[str, Any] , lowerCamelCase__ :torch.Tensor , lowerCamelCase__ :torch.Tensor ):
UpperCamelCase__ :Tuple = cls.squareplus(lowerCamelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict[str, int] = {"total_count": 1, "logits": 1}
_snake_case : type = NegativeBinomial
@classmethod
def __a ( cls :Tuple , lowerCamelCase__ :torch.Tensor , lowerCamelCase__ :torch.Tensor ):
UpperCamelCase__ :Union[str, Any] = cls.squareplus(lowerCamelCase__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __a ( self :Optional[Any] , lowerCamelCase__ :int ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase__ , logits=lowerCamelCase__ )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase__ , logits=lowerCamelCase__ ) , 1 )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[torch.Tensor] = None , lowerCamelCase__ :Optional[torch.Tensor] = None ):
UpperCamelCase__ , UpperCamelCase__ :int = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) ) | 45 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 | 0 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_lowerCAmelCase : List[str] = parse(importlib.metadata.version('''torch'''))
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
_lowerCamelCase : Any = STR_OPERATION_TO_FUNC[operation]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = parse(importlib.metadata.version(_lowerCamelCase ) )
return operation(_lowerCamelCase , parse(_lowerCamelCase ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
return compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) | 46 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str]=None ):
require_version(deps[pkg] , lowerCamelCase_ )
| 47 |
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : List[str] = F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = 'data2vec-vision'
def __init__( self : Optional[Any] , __magic_name__ : Optional[int]=768 , __magic_name__ : Any=12 , __magic_name__ : int=12 , __magic_name__ : str=3072 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : List[Any]=0.02 , __magic_name__ : Any=1E-12 , __magic_name__ : Dict=224 , __magic_name__ : Dict=16 , __magic_name__ : List[Any]=3 , __magic_name__ : Tuple=False , __magic_name__ : Optional[int]=False , __magic_name__ : Any=False , __magic_name__ : Optional[Any]=False , __magic_name__ : List[Any]=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Tuple=True , __magic_name__ : Dict=[3, 5, 7, 11] , __magic_name__ : int=[1, 2, 3, 6] , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=0.4 , __magic_name__ : int=256 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : Union[str, Any]=False , __magic_name__ : Optional[int]=255 , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = use_mask_token
lowerCAmelCase__ = use_absolute_position_embeddings
lowerCAmelCase__ = use_relative_position_bias
lowerCAmelCase__ = use_shared_relative_position_bias
lowerCAmelCase__ = layer_scale_init_value
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase__ = out_indices
lowerCAmelCase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ = use_auxiliary_head
lowerCAmelCase__ = auxiliary_loss_weight
lowerCAmelCase__ = auxiliary_channels
lowerCAmelCase__ = auxiliary_num_convs
lowerCAmelCase__ = auxiliary_concat_input
lowerCAmelCase__ = semantic_loss_ignore_index
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = version.parse('1.11' )
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return 1E-4
| 48 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ,__a : Any ) -> int:
"""simple docstring"""
_a : int = 0
if start < end:
_a : Tuple = randint(__a ,__a )
_a : Tuple = a[end]
_a : List[str] = a[pivot]
_a : Any = temp
_a , _a : Optional[int] = _in_place_partition(__a ,__a ,__a )
count += _in_place_quick_sort(__a ,__a ,p - 1 )
count += _in_place_quick_sort(__a ,p + 1 ,__a )
return count
def __UpperCAmelCase ( __a : List[Any] ,__a : Tuple ,__a : Dict ) -> Dict:
"""simple docstring"""
_a : Dict = 0
_a : Tuple = randint(__a ,__a )
_a : List[Any] = a[end]
_a : str = a[pivot]
_a : str = temp
_a : Dict = start - 1
for index in range(__a ,__a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : int = new_pivot_index + 1
_a : Any = a[new_pivot_index]
_a : Optional[int] = a[index]
_a : str = temp
_a : Union[str, Any] = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : Any = temp
return new_pivot_index + 1, count
a__ = TemporaryFile()
a__ = 100 # 1000 elements are to be sorted
a__ , a__ = 0, 1 # mean and standard deviation
a__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ = np.load(outfile)
a__ = len(M) - 1
a__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 14 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int = 4_000_000 ):
__UpperCAmelCase = [0, 1]
__UpperCAmelCase = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__UpperCAmelCase = 0
for j in range(len(snake_case_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 49 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14 | 0 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def A__ ( __lowerCAmelCase : bool = True , *__lowerCAmelCase : int , **__lowerCAmelCase : Union[str, Any] ):
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCamelCase__ = False
if main_process_only:
lowerCamelCase__ = PartialState().local_process_index == 0
return _tqdm(*__lowerCAmelCase , **__lowerCAmelCase , disable=__lowerCAmelCase )
| 50 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14 | 0 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
"""simple docstring"""
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="attention" ) -> Dict:
"""simple docstring"""
UpperCAmelCase = UpperCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
UpperCAmelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
UpperCAmelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
UpperCAmelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCAmelCase = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
UpperCAmelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str]=False ) -> List[str]:
"""simple docstring"""
if split_mlp_wi:
UpperCAmelCase = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
UpperCAmelCase = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
UpperCAmelCase = (wi_a, wi_a)
else:
UpperCAmelCase = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
UpperCAmelCase = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Any:
"""simple docstring"""
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def __snake_case ( SCREAMING_SNAKE_CASE_ : dict , *, SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : bool = False ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = traverse_util.flatten_dict(variables['''target'''] )
UpperCAmelCase = {'''/'''.join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase = old['''token_embedder/embedding''']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' , '''attention''' )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase, UpperCAmelCase = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = layer_norm
if split_mlp_wi:
UpperCAmelCase = wi[0].T
UpperCAmelCase = wi[1].T
else:
UpperCAmelCase = wi.T
UpperCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCAmelCase = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''encoder''' ).T
UpperCAmelCase = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
UpperCAmelCase = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE_ , 0 , '''encoder''' ).T
UpperCAmelCase = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE_ , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''self_attention''' )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''encoder_decoder_attention''' )
UpperCAmelCase = layer_norm
UpperCAmelCase = k.T
UpperCAmelCase = o.T
UpperCAmelCase = q.T
UpperCAmelCase = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase, UpperCAmelCase = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = layer_norm
if split_mlp_wi:
UpperCAmelCase = wi[0].T
UpperCAmelCase = wi[1].T
else:
UpperCAmelCase = wi.T
UpperCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCAmelCase = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''decoder''' ).T
UpperCAmelCase = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase = old['''decoder/logits_dense/kernel'''].T
return new
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : bool ) -> Any:
"""simple docstring"""
UpperCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCAmelCase = state_dict['''shared.weight''']
return state_dict
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = convert_tax_to_pytorch(
SCREAMING_SNAKE_CASE_ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE_ , scalable_attention=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = make_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase = UMTaEncoderModel(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE_ )
print('''Done''' )
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
a__ : List[str] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 51 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :str) -> list[int]:
return [ord(a_) - 96 for elem in plain]
def __A ( a_ :list[int]) -> str:
return "".join(chr(elem + 96) for elem in encoded)
def __A ( ) -> None:
__a : Dict = encode(input('''-> ''').strip().lower())
print('''Encoded: ''' , a_)
print('''Decoded:''' , decode(a_))
if __name__ == "__main__":
main() | 52 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 0 |
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : int ):
if digit_amount > 0:
return round(number - int(lowerCAmelCase_ ), lowerCAmelCase_ )
return number - int(lowerCAmelCase_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 53 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 | 0 |
def a__ ( lowercase__ = 1 , lowercase__ = 1_0_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =1
UpperCAmelCase_ =0
for divide_by_number in range(lowercase__ , digit + 1 ):
UpperCAmelCase_ =[]
UpperCAmelCase_ =numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowercase__ ):
UpperCAmelCase_ =len(lowercase__ )
UpperCAmelCase_ =divide_by_number
else:
has_been_divided.append(lowercase__ )
UpperCAmelCase_ =now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14 | 0 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any ,A : str ,A : Any ):
__A = question_encoder
__A = generator
__A = self.question_encoder
def UpperCamelCase_ ( self : Dict ,A : List[Any] ):
if os.path.isfile(A ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(A ,exist_ok=A )
__A = os.path.join(A ,"question_encoder_tokenizer" )
__A = os.path.join(A ,"generator_tokenizer" )
self.question_encoder.save_pretrained(A )
self.generator.save_pretrained(A )
@classmethod
def UpperCamelCase_ ( cls : Any ,A : str ,**A : Optional[Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__A = kwargs.pop("config" ,A )
if config is None:
__A = RagConfig.from_pretrained(A )
__A = AutoTokenizer.from_pretrained(
A ,config=config.question_encoder ,subfolder="question_encoder_tokenizer" )
__A = AutoTokenizer.from_pretrained(
A ,config=config.generator ,subfolder="generator_tokenizer" )
return cls(question_encoder=A ,generator=A )
def __call__( self : Tuple ,*A : List[str] ,**A : str ):
return self.current_tokenizer(*A ,**A )
def UpperCamelCase_ ( self : List[str] ,*A : List[str] ,**A : List[str] ):
return self.generator.batch_decode(*A ,**A )
def UpperCamelCase_ ( self : int ,*A : int ,**A : Union[str, Any] ):
return self.generator.decode(*A ,**A )
def UpperCamelCase_ ( self : Dict ):
__A = self.question_encoder
def UpperCamelCase_ ( self : Any ):
__A = self.generator
def UpperCamelCase_ ( self : Optional[Any] ,A : List[str] ,A : Optional[List[str]] = None ,A : Optional[int] = None ,A : Optional[int] = None ,A : str = "longest" ,A : str = None ,A : bool = True ,**A : Tuple ,):
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" ,A ,)
if max_length is None:
__A = self.current_tokenizer.model_max_length
__A = self(
A ,add_special_tokens=A ,return_tensors=A ,max_length=A ,padding=A ,truncation=A ,**A ,)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__A = self.current_tokenizer.model_max_length
__A = self(
text_target=A ,add_special_tokens=A ,return_tensors=A ,padding=A ,max_length=A ,truncation=A ,**A ,)
__A = labels["input_ids"]
return model_inputs
| 55 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_a : Any = logging.get_logger(__name__)
class _lowercase ( __lowercase ):
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : List[str] ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 56 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14 | 0 |
from math import factorial
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: List[Any] = real
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = [1] * rank
else:
UpperCamelCase_: List[str] = rank
def __repr__( self ):
return (
f'''{self.real}+'''
f'''{'+'.join(str(_lowerCamelCase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def _a ( self ):
UpperCamelCase_: Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _lowerCamelCase )
def __add__( self , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
return Dual(self.real + other , self.duals )
UpperCamelCase_: List[Any] = self.duals.copy()
UpperCamelCase_: Optional[int] = other.duals.copy()
if len(_lowerCamelCase ) > len(_lowerCamelCase ):
o_dual.extend([1] * (len(_lowerCamelCase ) - len(_lowerCamelCase )) )
elif len(_lowerCamelCase ) < len(_lowerCamelCase ):
s_dual.extend([1] * (len(_lowerCamelCase ) - len(_lowerCamelCase )) )
UpperCamelCase_: List[str] = []
for i in range(len(_lowerCamelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _lowerCamelCase )
a : Union[str, Any] =__add__
def __sub__( self , _lowerCamelCase ):
return self + other * -1
def __mul__( self , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _lowerCamelCase )
UpperCamelCase_: List[Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _lowerCamelCase )
a : Optional[int] =__mul__
def __truediv__( self , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _lowerCamelCase )
raise ValueError
def __floordiv__( self , _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _lowerCamelCase )
raise ValueError
def __pow__( self , _lowerCamelCase ):
if n < 0 or isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError('power must be a positive integer' )
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase_: List[str] = self
for _ in range(n - 1 ):
x *= self
return x
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
if not callable(UpperCAmelCase__ ):
raise ValueError('differentiate() requires a function as input for func' )
if not isinstance(UpperCAmelCase__ , (float, int) ):
raise ValueError('differentiate() requires a float as input for position' )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError('differentiate() requires an int as input for order' )
UpperCamelCase_: Optional[int] = Dual(UpperCAmelCase__ , 1 )
UpperCamelCase_: Optional[Any] = func(UpperCAmelCase__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def snake_case (UpperCAmelCase__ ) -> Optional[Any]:
return y**2 * y**4
print(differentiate(f, 9, 2)) | 57 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCAmelCase : Tuple = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
__lowerCAmelCase : Optional[Any] = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
@lru_cache()
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Optional[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
snake_case_ : Tuple = bs[:]
snake_case_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
snake_case_ : Dict = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : List[Any] = set()
snake_case_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ : str = char
return pairs
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowercase , _lowercase , _lowercase="replace" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=False , **_lowercase , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
snake_case_ : Optional[int] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
snake_case_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
snake_case_ : List[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
snake_case_ : Any = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
snake_case_ : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding="""utf-8""" ) as vocab_handle:
snake_case_ : Optional[Any] = json.load(_lowercase )
snake_case_ : Any = {v: k for k, v in self.encoder.items()}
snake_case_ : Optional[Any] = errors # how to handle errors in decoding
snake_case_ : List[Any] = bytes_to_unicode()
snake_case_ : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding="""utf-8""" ) as merges_handle:
snake_case_ : str = merges_handle.read().split("""\n""" )[1:-1]
snake_case_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case_ : List[Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case_ : Dict = {}
snake_case_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case_ : Optional[int] = tuple(_lowercase )
snake_case_ : List[Any] = get_pairs(_lowercase )
if not pairs:
return token
while True:
snake_case_ : Optional[int] = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ : List[str] = bigram
snake_case_ : List[str] = []
snake_case_ : Optional[int] = 0
while i < len(_lowercase ):
try:
snake_case_ : Any = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ : List[str] = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ : Union[str, Any] = tuple(_lowercase )
snake_case_ : int = new_word
if len(_lowercase ) == 1:
break
else:
snake_case_ : Tuple = get_pairs(_lowercase )
snake_case_ : Dict = """ """.join(_lowercase )
snake_case_ : Any = word
return word
def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = []
for token in re.findall(self.pat , _lowercase ):
snake_case_ : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase__ ( self , _lowercase ) -> Tuple:
'''simple docstring'''
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self , _lowercase ) -> int:
'''simple docstring'''
return self.decoder.get(_lowercase )
def UpperCAmelCase__ ( self , _lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : str = """""".join(_lowercase )
snake_case_ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case_ : int = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Optional[int] = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + """\n""" )
snake_case_ : Optional[Any] = 0
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
snake_case_ : Union[str, Any] = token_index
writer.write(""" """.join(_lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
snake_case_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self , _lowercase , _lowercase=False , **_lowercase ) -> List[str]:
'''simple docstring'''
snake_case_ : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
snake_case_ : Any = """ """ + text
return (text, kwargs)
| 58 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = name
_a : List[str] = value
_a : List[str] = weight
def __repr__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __lowercase ( self ) -> List[Any]:
return self.value
def __lowercase ( self ) -> int:
return self.name
def __lowercase ( self ) -> Optional[int]:
return self.weight
def __lowercase ( self ) -> Optional[Any]:
return self.value / self.weight
def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : List[str] ) -> List[str]:
"""simple docstring"""
_a : Optional[int] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( __a : int ,__a : Union[str, Any] ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = sorted(__a ,key=__a ,reverse=__a )
_a : Any = []
_a , _a : Optional[int] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( __a , __a=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Dict =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCamelCase__: int =[(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( __a , __a , __a=False ) -> Any:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase__: List[Any] =""
else:
lowerCamelCase__: Dict ="vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__: Tuple =state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowerCamelCase__: int =state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__: int =in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase__: Optional[int] =in_proj_bias[: config.hidden_size]
lowerCamelCase__: Union[str, Any] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__: str =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__: Dict =in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__: str =in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__a , __a )
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: Any =[
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: int =dct.pop(__a )
lowerCamelCase__: Tuple =val
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: List[str] =ViTMSNConfig()
lowerCamelCase__: Optional[Any] =1000
lowerCamelCase__: Dict ="datasets/huggingface/label-files"
lowerCamelCase__: List[str] ="imagenet-1k-id2label.json"
lowerCamelCase__: List[str] =json.load(open(hf_hub_download(__a , __a ) , "r" ) )
lowerCamelCase__: int ={int(__a ): v for k, v in idalabel.items()}
lowerCamelCase__: List[str] =idalabel
lowerCamelCase__: Union[str, Any] ={v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCamelCase__: Dict =384
lowerCamelCase__: Union[str, Any] =1536
lowerCamelCase__: Dict =6
elif "l16" in checkpoint_url:
lowerCamelCase__: str =1024
lowerCamelCase__: List[Any] =4096
lowerCamelCase__: List[str] =24
lowerCamelCase__: List[Any] =16
lowerCamelCase__: Any =0.1
elif "b4" in checkpoint_url:
lowerCamelCase__: List[str] =4
elif "l7" in checkpoint_url:
lowerCamelCase__: Optional[Any] =7
lowerCamelCase__: int =1024
lowerCamelCase__: int =4096
lowerCamelCase__: Tuple =24
lowerCamelCase__: List[Any] =16
lowerCamelCase__: Tuple =0.1
lowerCamelCase__: Union[str, Any] =ViTMSNModel(__a )
lowerCamelCase__: Tuple =torch.hub.load_state_dict_from_url(__a , map_location="cpu" )["target_encoder"]
lowerCamelCase__: Union[str, Any] =ViTImageProcessor(size=config.image_size )
remove_projection_head(__a )
lowerCamelCase__: Optional[Any] =create_rename_keys(__a , base_model=__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , base_model=__a )
model.load_state_dict(__a )
model.eval()
lowerCamelCase__: Dict ="http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase__: Any =Image.open(requests.get(__a , stream=__a ).raw )
lowerCamelCase__: Union[str, Any] =ViTImageProcessor(
size=config.image_size , image_mean=__a , image_std=__a )
lowerCamelCase__: List[str] =image_processor(images=__a , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCamelCase__: Union[str, Any] =model(**__a )
lowerCamelCase__: List[str] =outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCamelCase__: str =torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowerCamelCase__: Tuple =torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowerCamelCase__: List[Any] =torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowerCamelCase__: Union[str, Any] =torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowerCamelCase__: Dict =torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __a , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__A = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 59 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = {}
snake_case_ : Dict = tokenizer(example['''content'''] , truncation=_UpperCamelCase )['''input_ids''']
snake_case_ : str = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
lowerCAmelCase_ = HfArgumentParser(PretokenizationArguments)
lowerCAmelCase_ = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase_ = multiprocessing.cpu_count()
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowerCAmelCase_ = time.time()
lowerCAmelCase_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowerCAmelCase_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 60 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = (DPMSolverSDEScheduler,)
snake_case__ = 1_0
def a ( self : Dict , **SCREAMING_SNAKE_CASE__ : str ) -> str:
lowerCAmelCase__ = {
"num_train_timesteps": 1_100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def a ( self : Any ) -> int:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> int:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] ) -> Optional[Any]:
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1e-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def a ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(prediction_type="v_prediction" )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1e-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1e-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1e-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1e-3
def a ( self : Tuple ) -> Optional[int]:
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1e-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def a ( self : List[str] ) -> List[str]:
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ , use_karras_sigmas=SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(self.num_inference_steps , device=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter.to(SCREAMING_SNAKE_CASE__ ) * scheduler.init_noise_sigma
lowerCAmelCase__ = sample.to(SCREAMING_SNAKE_CASE__ )
for t in scheduler.timesteps:
lowerCAmelCase__ = scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = output.prev_sample
lowerCAmelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
| 61 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=13 , UpperCAmelCase_ : List[str]=7 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : str=36 , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : Union[str, Any]=6 , UpperCAmelCase_ : List[Any]=6 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : Union[str, Any]=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Tuple=None , ):
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : int = seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : str = use_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = embedding_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_groups
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : Dict = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : Any ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : List[str] = AlbertModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _A ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = AlbertForPreTraining(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , sentence_order_label=UpperCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _A ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : List[Any] = AlbertForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : List[str] = AlbertForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : Any = AlbertForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = AlbertForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = AlbertForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : int = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : str = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : List[Any] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Dict = True
def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]=False ):
SCREAMING_SNAKE_CASE : Dict = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
return inputs_dict
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = AlbertModelTester(self )
SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def _A ( self : Any ):
self.config_tester.run_common_tests()
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : int = type
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
@slow
def _A ( self : Optional[int] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = AlbertModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = AlbertModel.from_pretrained("albert-base-v2" )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Tuple = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE : str = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 62 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 | 0 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class a ( lowercase__ ):
"""simple docstring"""
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER
__UpperCAmelCase : List[Any] = """pt"""
__UpperCAmelCase : List[Any] = """tf"""
def UpperCAmelCase ( self : Tuple , __lowercase : List[str] ) -> Union[str, Any]:
__UpperCAmelCase : Dict = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Any = TFAutoModel.from_pretrained(self.test_model , from_pt=__lowercase )
model_tf.save_pretrained(__lowercase )
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__UpperCAmelCase : Optional[int] = """mock_framework"""
# Framework provided - return whatever the user provides
__UpperCAmelCase : int = FeaturesManager.determine_framework(self.test_model , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase )
__UpperCAmelCase : List[str] = FeaturesManager.determine_framework(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase )
__UpperCAmelCase : List[str] = FeaturesManager.determine_framework(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> int:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase )
__UpperCAmelCase : Optional[Any] = FeaturesManager.determine_framework(__lowercase )
self.assertEqual(__lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase )
__UpperCAmelCase : str = FeaturesManager.determine_framework(__lowercase )
self.assertEqual(__lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Any = FeaturesManager.determine_framework(__lowercase )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = MagicMock(return_value=__lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , __lowercase ):
__UpperCAmelCase : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__UpperCAmelCase : Dict = MagicMock(return_value=__lowercase )
with patch("""transformers.onnx.features.is_torch_available""" , __lowercase ):
__UpperCAmelCase : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_tf )
# Both in environment -> use PyTorch
__UpperCAmelCase : List[str] = MagicMock(return_value=__lowercase )
__UpperCAmelCase : int = MagicMock(return_value=__lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , __lowercase ), patch(
"""transformers.onnx.features.is_torch_available""" , __lowercase ):
__UpperCAmelCase : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_pt )
# Both not in environment -> raise error
__UpperCAmelCase : Optional[int] = MagicMock(return_value=__lowercase )
__UpperCAmelCase : Tuple = MagicMock(return_value=__lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , __lowercase ), patch(
"""transformers.onnx.features.is_torch_available""" , __lowercase ):
with self.assertRaises(__lowercase ):
__UpperCAmelCase : Tuple = FeaturesManager.determine_framework(self.test_model )
| 63 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Dict = '''ZinengTang/tvlt-base'''
_a : List[str] = tempfile.mkdtemp()
def __lowercase ( self , **_a ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : Union[str, Any] = np.ones([1_2_0_0_0] )
_a : Dict = feature_extractor(_a , return_tensors='''np''' )
_a : Tuple = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = image_processor(_a , return_tensors='''np''' )
_a : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[str] = np.ones([1_2_0_0_0] )
_a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 14 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : str = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[str] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __lowercase ( __lowerCamelCase ):
snake_case_ = """SpeechT5FeatureExtractor"""
snake_case_ = """SpeechT5Tokenizer"""
def __init__( self : Optional[int] ,A : str ,A : Any ):
'''simple docstring'''
super().__init__(A ,A )
def __call__( self : str ,*A : Tuple ,**A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = kwargs.pop("""audio""" ,A )
UpperCAmelCase__ : Union[str, Any] = kwargs.pop("""text""" ,A )
UpperCAmelCase__ : Optional[int] = kwargs.pop("""text_target""" ,A )
UpperCAmelCase__ : Any = kwargs.pop("""audio_target""" ,A )
UpperCAmelCase__ : List[str] = kwargs.pop("""sampling_rate""" ,A )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
UpperCAmelCase__ : Union[str, Any] = self.feature_extractor(A ,*A ,sampling_rate=A ,**A )
elif text is not None:
UpperCAmelCase__ : Optional[int] = self.tokenizer(A ,**A )
else:
UpperCAmelCase__ : Optional[int] = None
if audio_target is not None:
UpperCAmelCase__ : List[Any] = self.feature_extractor(audio_target=A ,*A ,sampling_rate=A ,**A )
UpperCAmelCase__ : int = targets["""input_values"""]
elif text_target is not None:
UpperCAmelCase__ : List[str] = self.tokenizer(A ,**A )
UpperCAmelCase__ : Optional[Any] = targets["""input_ids"""]
else:
UpperCAmelCase__ : List[Any] = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase__ : Optional[Any] = labels
UpperCAmelCase__ : Any = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
UpperCAmelCase__ : List[Any] = decoder_attention_mask
return inputs
def __lowercase ( self : Optional[int] ,*A : int ,**A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = kwargs.pop("""input_values""" ,A )
UpperCAmelCase__ : Tuple = kwargs.pop("""input_ids""" ,A )
UpperCAmelCase__ : Union[str, Any] = kwargs.pop("""labels""" ,A )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
UpperCAmelCase__ : List[str] = self.feature_extractor.pad(A ,*A ,**A )
elif input_ids is not None:
UpperCAmelCase__ : Dict = self.tokenizer.pad(A ,**A )
else:
UpperCAmelCase__ : Dict = None
if labels is not None:
if "input_ids" in labels or (isinstance(A ,A ) and "input_ids" in labels[0]):
UpperCAmelCase__ : int = self.tokenizer.pad(A ,**A )
UpperCAmelCase__ : Optional[Any] = targets["""input_ids"""]
else:
UpperCAmelCase__ : Union[str, Any] = self.feature_extractor.feature_size
UpperCAmelCase__ : Optional[Any] = self.feature_extractor.num_mel_bins
UpperCAmelCase__ : int = self.feature_extractor.pad(A ,*A ,**A )
UpperCAmelCase__ : List[str] = feature_size_hack
UpperCAmelCase__ : Optional[int] = targets["""input_values"""]
else:
UpperCAmelCase__ : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase__ : Any = labels
UpperCAmelCase__ : Dict = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
UpperCAmelCase__ : List[str] = decoder_attention_mask
return inputs
def __lowercase ( self : str ,*A : int ,**A : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A ,**A )
def __lowercase ( self : int ,*A : Any ,**A : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*A ,**A )
| 65 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Optional[int] ) -> Dict:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : int ,__a : List[str]="attention" ) -> List[str]:
"""simple docstring"""
_a : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Tuple = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_a : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Any = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_a : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Union[str, Any] ,__a : List[Any] ,__a : Any=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : List[str] = (wi_a, wi_a)
else:
_a : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : str ) -> List[str]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCAmelCase ( __a : dict ,*, __a : int ,__a : bool ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : Dict = traverse_util.flatten_dict(variables['''target'''] )
_a : Any = {'''/'''.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : Optional[int] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,__a )
_a : Tuple = collections.OrderedDict()
# Shared embeddings.
_a : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Optional[Any] = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_attention_layer_norm''' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(__a ,__a ,'''encoder''' ,'''attention''' )
_a : List[str] = layer_norm
_a : Optional[Any] = k.T
_a : str = o.T
_a : List[Any] = q.T
_a : Tuple = v.T
# Block i, layer 1 (MLP).
_a : str = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Any = tax_mlp_lookup(__a ,__a ,'''encoder''' ,__a )
_a : str = layer_norm
if split_mlp_wi:
_a : List[Any] = wi[0].T
_a : Any = wi[1].T
else:
_a : Any = wi.T
_a : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Dict = tax_relpos_bias_lookup(
__a ,__a ,'''encoder''' ).T
_a : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_a : List[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''encoder''' ).T
_a : Optional[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Union[str, Any] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_a , _a , _a , _a : Optional[Any] = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''self_attention''' )
_a : Optional[Any] = layer_norm
_a : Dict = k.T
_a : str = o.T
_a : str = q.T
_a : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_a : Any = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_a , _a , _a , _a : str = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''encoder_decoder_attention''' )
_a : Optional[Any] = layer_norm
_a : Optional[int] = k.T
_a : Dict = o.T
_a : str = q.T
_a : int = v.T
# Block i, layer 2 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Tuple = tax_mlp_lookup(__a ,__a ,'''decoder''' ,__a )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : List[Any] = wi[1].T
else:
_a : Dict = wi.T
_a : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Tuple = tax_relpos_bias_lookup(__a ,__a ,'''decoder''' ).T
_a : Tuple = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : Any = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( __a : Dict ,__a : bool ) -> Tuple:
"""simple docstring"""
_a : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_a : str = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
_a : List[str] = checkpoints.load_tax_checkpoint(__a )
_a : str = convert_tax_to_pytorch(
__a ,num_layers=config.num_layers ,is_encoder_only=__a ,scalable_attention=__a )
_a : str = make_state_dict(__a ,__a )
model.load_state_dict(__a ,strict=__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ,__a : Union[str, Any] ,__a : bool = False ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = MTaConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(__a )
else:
_a : Tuple = UMTaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a ,__a ,__a ,__a ,__a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 14 | 0 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ):
_lowercase : Optional[Any] = parent
_lowercase : Any = batch_size
_lowercase : str = seq_length
_lowercase : Union[str, Any] = is_training
_lowercase : Tuple = use_attention_mask
_lowercase : List[str] = use_token_type_ids
_lowercase : Tuple = use_labels
_lowercase : Tuple = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : List[str] = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : str = max_position_embeddings
_lowercase : Tuple = type_vocab_size
_lowercase : Optional[int] = type_sequence_label_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = num_choices
def __a ( self ):
_lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : str = None
if self.use_attention_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Optional[Any] = None
if self.use_token_type_ids:
_lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Any = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self ):
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Dict = config_and_inputs
_lowercase : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __a ( self ):
_lowercase : Tuple = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : List[Any] = config_and_inputs
_lowercase : Any = True
_lowercase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : int = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self ):
_lowercase : Optional[Any] = FlaxBertModelTester(self )
@slow
def __a ( self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
_lowercase : Union[str, Any] = FlaxBertModel.from_pretrained('bert-base-cased' )
_lowercase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 66 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a__ = '''Usage of script: script_name <size_of_canvas:int>'''
a__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( __a : int ) -> list[list[bool]]:
"""simple docstring"""
_a : int = [[False for i in range(__a )] for j in range(__a )]
return canvas
def __UpperCAmelCase ( __a : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
_a : Optional[int] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( __a : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
_a : Any = np.array(__a )
_a : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
_a : Tuple = __judge_point(
__a ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( __a : bool ,__a : list[list[bool]] ) -> bool:
"""simple docstring"""
_a : Optional[Any] = 0
_a : str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a : Optional[int] = pt
if pt:
if alive < 2:
_a : Dict = False
elif alive == 2 or alive == 3:
_a : Optional[Any] = True
elif alive > 3:
_a : str = False
else:
if alive == 3:
_a : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a__ = int(sys.argv[1])
# main working structure of this module.
a__ = create_canvas(canvas_size)
seed(c)
a__ , a__ = plt.subplots()
fig.show()
a__ = ListedColormap(['''w''', '''k'''])
try:
while True:
a__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 14 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE_ : int = ['''past_key_values''']
SCREAMING_SNAKE_CASE_ : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] ,__A : List[Any]=5_0265 ,__A : str=512 ,__A : Optional[int]=8 ,__A : Any=2048 ,__A : Tuple=16 ,__A : str=8 ,__A : int=2048 ,__A : List[str]=16 ,__A : Optional[int]=0.0 ,__A : Any=0.0 ,__A : int=True ,__A : List[Any]=True ,__A : Tuple="gelu" ,__A : Any=512 ,__A : Dict=0.1 ,__A : Tuple=0.0 ,__A : int=0.0 ,__A : int=0.02 ,__A : Dict=1 ,__A : str=False ,__A : Dict=0 ,__A : Union[str, Any]=1 ,__A : Optional[int]=2 ,__A : List[str]=2 ,**__A : Tuple ,) -> Tuple:
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = d_model
_lowercase = encoder_ffn_dim
_lowercase = encoder_layers
_lowercase = encoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = activation_function
_lowercase = init_std
_lowercase = encoder_layerdrop
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = encoder_layers
_lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__A ,bos_token_id=__A ,eos_token_id=__A ,is_encoder_decoder=__A ,decoder_start_token_id=__A ,forced_eos_token_id=__A ,**__A ,)
class A_ ( UpperCAmelCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase = {0: 'batch'}
_lowercase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
_lowercase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_lowercase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super().outputs
else:
_lowercase = super(__A ,self ).outputs
if self.use_past:
_lowercase , _lowercase = self.num_layers
for i in range(__A ):
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
_lowercase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self : Optional[int] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
# Generate decoder inputs
_lowercase = seq_length if not self.use_past else 1
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
_lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_lowercase = dict(**__A ,**__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
_lowercase = common_inputs['decoder_input_ids'].shape[1]
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = decoder_seq_length + 3
_lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowercase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__A ,__A )] ,dim=1 )
_lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowercase , _lowercase = self.num_layers
_lowercase = min(__A ,__A )
_lowercase = max(__A ,__A ) - min_num_layers
_lowercase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
_lowercase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__A ,__A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def __UpperCAmelCase ( self : List[Any] ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,__A ,__A ,__A ,__A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase , _lowercase = self.num_layers
_lowercase , _lowercase = self.num_attention_heads
_lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowercase = common_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[common_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowercase = tokenizer.num_special_tokens_to_add(__A )
_lowercase = compute_effective_axis_dimension(
__A ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
_lowercase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowercase = dict(tokenizer(__A ,return_tensors=__A ) )
return common_inputs
def __UpperCAmelCase ( self : Dict ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
elif self.task == "causal-lm":
_lowercase = self._generate_dummy_inputs_for_causal_lm(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
else:
_lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
return common_inputs
def __UpperCAmelCase ( self : List[str] ,__A : Dict ,__A : Any ,__A : List[Any] ,__A : Tuple ) -> Union[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowercase = super()._flatten_past_key_values_(__A ,__A ,__A ,__A )
else:
_lowercase = super(__A ,self )._flatten_past_key_values_(
__A ,__A ,__A ,__A ) | 67 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 | 0 |
import torch
from transformers import AutoModel
class _A ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : str="sayef/fsner-bert-base-uncased" ) -> Optional[int]:
super(__SCREAMING_SNAKE_CASE , self ).__init__()
__UpperCAmelCase =AutoModel.from_pretrained(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =torch.nn.CosineSimilarity(3 , 1e-08 )
__UpperCAmelCase =torch.nn.Softmax(dim=1 )
def _a ( self : str , **__SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
return self.bert(**__SCREAMING_SNAKE_CASE ).last_hidden_state
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict ) -> str:
return token_embeddings.sum(2 , keepdim=__SCREAMING_SNAKE_CASE )
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str=1 ) -> Optional[int]:
return self.softmax(T * self.cos(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ) -> Any:
__UpperCAmelCase =W_supports["""sizes"""].tolist()
__UpperCAmelCase =W_supports["""start_token_id"""].item()
__UpperCAmelCase =W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__UpperCAmelCase =self.BERT(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.BERT(**__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =None
__UpperCAmelCase =None
__UpperCAmelCase =W_supports["""input_ids"""] == start_token_id
__UpperCAmelCase =W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(__SCREAMING_SNAKE_CASE ):
if i == 0:
__UpperCAmelCase =0
else:
__UpperCAmelCase =support_sizes[i - 1]
__UpperCAmelCase =S[s : s + size][start_token_masks[s : s + size]]
__UpperCAmelCase =S[s : s + size][end_token_masks[s : s + size]]
__UpperCAmelCase =torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__UpperCAmelCase =torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__UpperCAmelCase =torch.vstack((p_starts, p_start) )
__UpperCAmelCase =torch.vstack((p_ends, p_end) )
else:
__UpperCAmelCase =p_start
__UpperCAmelCase =p_end
return p_starts, p_ends
| 68 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any="attention" ) -> Optional[int]:
__snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str]=False ) -> Optional[int]:
if split_mlp_wi:
__snake_case = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
__snake_case = (wi_a, wi_a)
else:
__snake_case = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
__snake_case = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ) -> int:
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def __UpperCAmelCase ( _UpperCAmelCase : dict , *, _UpperCAmelCase : int , _UpperCAmelCase : bool ) -> Optional[int]:
__snake_case = traverse_util.flatten_dict(variables["target"] )
__snake_case = {"/".join(_UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__snake_case = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , _UpperCAmelCase )
__snake_case = collections.OrderedDict()
# Shared embeddings.
__snake_case = old["token_embedder/embedding"]
# Encoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "encoder" , "pre_attention_layer_norm" )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , "encoder" , "attention" )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 1 (MLP).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "encoder" , "pre_mlp_layer_norm" )
__snake_case , __snake_case = tax_mlp_lookup(_UpperCAmelCase , _UpperCAmelCase , "encoder" , _UpperCAmelCase )
__snake_case = layer_norm
if split_mlp_wi:
__snake_case = wi[0].T
__snake_case = wi[1].T
else:
__snake_case = wi.T
__snake_case = wo.T
__snake_case = old[
"encoder/relpos_bias/rel_embedding"
].T
__snake_case = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "pre_self_attention_layer_norm" )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "self_attention" )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 1 (Cross Attention).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "pre_cross_attention_layer_norm" )
__snake_case , __snake_case , __snake_case , __snake_case = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "encoder_decoder_attention" )
__snake_case = layer_norm
__snake_case = k.T
__snake_case = o.T
__snake_case = q.T
__snake_case = v.T
# Block i, layer 2 (MLP).
__snake_case = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , "pre_mlp_layer_norm" )
__snake_case , __snake_case = tax_mlp_lookup(_UpperCAmelCase , _UpperCAmelCase , "decoder" , _UpperCAmelCase )
__snake_case = layer_norm
if split_mlp_wi:
__snake_case = wi[0].T
__snake_case = wi[1].T
else:
__snake_case = wi.T
__snake_case = wo.T
__snake_case = old["decoder/decoder_norm/scale"]
__snake_case = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__snake_case = old["decoder/logits_dense/kernel"].T
return new
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : bool ) -> Optional[int]:
__snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__snake_case = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__snake_case = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__snake_case = state_dict["shared.weight"]
return state_dict
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ) -> Dict:
__snake_case = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
__snake_case = convert_tax_to_pytorch(_UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=_UpperCAmelCase )
__snake_case = make_state_dict(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : bool = False ) -> Optional[Any]:
__snake_case = TaConfig.from_json_file(_UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__snake_case = TaEncoderModel(_UpperCAmelCase )
else:
__snake_case = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCAmelCase )
print("Done" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
a : Dict = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 69 |
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : List[str] = F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 70 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ,__a : Any ) -> int:
"""simple docstring"""
_a : int = 0
if start < end:
_a : Tuple = randint(__a ,__a )
_a : Tuple = a[end]
_a : List[str] = a[pivot]
_a : Any = temp
_a , _a : Optional[int] = _in_place_partition(__a ,__a ,__a )
count += _in_place_quick_sort(__a ,__a ,p - 1 )
count += _in_place_quick_sort(__a ,p + 1 ,__a )
return count
def __UpperCAmelCase ( __a : List[Any] ,__a : Tuple ,__a : Dict ) -> Dict:
"""simple docstring"""
_a : Dict = 0
_a : Tuple = randint(__a ,__a )
_a : List[Any] = a[end]
_a : str = a[pivot]
_a : str = temp
_a : Dict = start - 1
for index in range(__a ,__a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : int = new_pivot_index + 1
_a : Any = a[new_pivot_index]
_a : Optional[int] = a[index]
_a : str = temp
_a : Union[str, Any] = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : Any = temp
return new_pivot_index + 1, count
a__ = TemporaryFile()
a__ = 100 # 1000 elements are to be sorted
a__ , a__ = 0, 1 # mean and standard deviation
a__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ = np.load(outfile)
a__ = len(M) - 1
a__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 14 | 0 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ : Any = str(bin(_SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Optional[int] = str(bin(_SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = max(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_SCREAMING_SNAKE_CASE ) , b_binary.zfill(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14 | 0 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_UpperCAmelCase : Dict = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
_UpperCAmelCase : Union[str, Any] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
_UpperCAmelCase : Dict = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _A( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def _A( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=True , snake_case_=False ):
if rouge_types is None:
lowercase =['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
lowercase =rouge_scorer.RougeScorer(rouge_types=snake_case_ , use_stemmer=snake_case_ )
if use_aggregator:
lowercase =scoring.BootstrapAggregator()
else:
lowercase =[]
for ref, pred in zip(snake_case_ , snake_case_ ):
lowercase =scorer.score(snake_case_ , snake_case_ )
if use_aggregator:
aggregator.add_scores(snake_case_ )
else:
scores.append(snake_case_ )
if use_aggregator:
lowercase =aggregator.aggregate()
else:
lowercase ={}
for key in scores[0]:
lowercase =[score[key] for score in scores]
return result
| 72 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : Tuple = '▁'
a_ : str = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
a_ : Optional[Any] = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
a_ : List[Any] = {
'facebook/s2t-small-librispeech-asr': 10_24,
}
a_ : Any = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
a_ : Dict = {'mustc': MUSTC_LANGS}
class _snake_case ( A__ ):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = MAX_MODEL_INPUT_SIZES
_lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
_lowercase : List[int] = []
def __init__( self , a , a , a="<s>" , a="</s>" , a="<pad>" , a="<unk>" , a=False , a=False , a=None , a=None , a = None , **a , ) -> None:
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , do_upper_case=a , do_lower_case=a , tgt_lang=a , lang_codes=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
SCREAMING_SNAKE_CASE = do_upper_case
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = load_json(a)
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE = spm_file
SCREAMING_SNAKE_CASE = load_spm(a , self.sp_model_kwargs)
if lang_codes is not None:
SCREAMING_SNAKE_CASE = lang_codes
SCREAMING_SNAKE_CASE = LANGUAGES[lang_codes]
SCREAMING_SNAKE_CASE = [f'''<lang:{lang}>''' for lang in self.langs]
SCREAMING_SNAKE_CASE = {lang: self.sp_model.PieceToId(f'''<lang:{lang}>''') for lang in self.langs}
SCREAMING_SNAKE_CASE = self.lang_tokens
SCREAMING_SNAKE_CASE = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang)
else:
SCREAMING_SNAKE_CASE = {}
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return len(self.encoder)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> str:
return self._tgt_lang
@tgt_lang.setter
def SCREAMING_SNAKE_CASE__ ( self , a) -> None:
SCREAMING_SNAKE_CASE = new_tgt_lang
self.set_tgt_lang_special_tokens(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> None:
SCREAMING_SNAKE_CASE = self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE = [lang_code_id]
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
return self.sp_model.encode(a , out_type=a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
return self.encoder.get(a , self.encoder[self.unk_token])
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
return self.decoder.get(a , self.unk_token)
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
SCREAMING_SNAKE_CASE = self.sp_model.decode(a)
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(a)
SCREAMING_SNAKE_CASE = self.sp_model.decode(a)
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , a , a=None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
SCREAMING_SNAKE_CASE = [1] * len(self.prefix_tokens)
SCREAMING_SNAKE_CASE = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a)) + suffix_ones
return prefix_ones + ([0] * len(a)) + ([0] * len(a)) + suffix_ones
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Dict:
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , a) -> None:
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = load_spm(self.spm_file , self.sp_model_kwargs)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
SCREAMING_SNAKE_CASE = Path(a)
assert save_dir.is_dir(), f'''{save_directory} should be a directory'''
SCREAMING_SNAKE_CASE = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
SCREAMING_SNAKE_CASE = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , a)
if os.path.abspath(self.spm_file) != os.path.abspath(a) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , a)
elif not os.path.isfile(self.spm_file):
with open(a , 'wb') as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(a)
return (str(a), str(a))
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = sentencepiece.SentencePieceProcessor(**_UpperCAmelCase)
spm.Load(str(_UpperCAmelCase))
return spm
def lowerCamelCase__ (_UpperCAmelCase):
with open(_UpperCAmelCase , 'r') as f:
return json.load(_UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
with open(_UpperCAmelCase , 'w') as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=2)
| 73 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowercase_ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowercase_ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowercase_ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowercase_ = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
lowercase_ = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
lowercase_ = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(64, 64)
)
lowercase_ = tf.keras.preprocessing.image.img_to_array(test_image)
lowercase_ = np.expand_dims(test_image, axis=0)
lowercase_ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowercase_ = """Normal"""
if result[0][0] == 1:
lowercase_ = """Abnormality detected"""
| 74 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['input_features', 'attention_mask']
def __init__( self : Any , _A : Tuple=80 , _A : Optional[int]=16_000 , _A : Optional[Any]=80 , _A : List[Any]=0.0 , _A : List[Any]=True , _A : List[str]=True , _A : Any=True , **_A : Tuple , ):
'''simple docstring'''
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
UpperCAmelCase__ : int = num_mel_bins
UpperCAmelCase__ : Optional[Any] = do_ceptral_normalize
UpperCAmelCase__ : List[str] = normalize_means
UpperCAmelCase__ : Optional[Any] = normalize_vars
UpperCAmelCase__ : int = True
def lowercase_ ( self : int , _A : np.ndarray , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
UpperCAmelCase__ : Optional[int] = torch.from_numpy(_A ).unsqueeze(0 )
UpperCAmelCase__ : Union[str, Any] = ta_kaldi.fbank(_A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowercase_ ( _A : np.ndarray , _A : int , _A : Optional[bool] = True , _A : Optional[bool] = True , _A : float = 0.0 , ):
'''simple docstring'''
if normalize_means:
UpperCAmelCase__ : Optional[Any] = x[:input_length].mean(axis=0 )
UpperCAmelCase__ : Dict = np.subtract(_A , _A )
if normalize_vars:
UpperCAmelCase__ : Union[str, Any] = x[:input_length].std(axis=0 )
UpperCAmelCase__ : str = np.divide(_A , _A )
if input_length < x.shape[0]:
UpperCAmelCase__ : str = padding_value
# make sure array is in float32
UpperCAmelCase__ : int = x.astype(np.floataa )
return x
def lowercase_ ( self : str , _A : List[np.ndarray] , _A : Optional[np.ndarray] = None ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_A , _A , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_A , _A )
]
def __call__( self : Optional[int] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : Any , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCAmelCase__ : Any = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase__ : Tuple = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase__ : Any = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
UpperCAmelCase__ : Any = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase__ : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase__ : Optional[Any] = [raw_speech]
# extract fbank features
UpperCAmelCase__ : Optional[Any] = [self._extract_fbank_features(_A ) for waveform in raw_speech]
# convert into correct format for padding
UpperCAmelCase__ : List[Any] = BatchFeature({'''input_features''': features} )
UpperCAmelCase__ : int = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
# make sure list is in array format
UpperCAmelCase__ : List[Any] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , _A ):
UpperCAmelCase__ : Optional[int] = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
UpperCAmelCase__ : List[Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
UpperCAmelCase__ : Dict = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
UpperCAmelCase__ : List[str] = (
np.array(_A , dtype=np.intaa )
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
UpperCAmelCase__ : str = self.normalize(
padded_inputs['''input_features'''] , attention_mask=_A )
if return_tensors is not None:
UpperCAmelCase__ : Optional[Any] = padded_inputs.convert_to_tensors(_A )
return padded_inputs
| 75 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 | 0 |
"""simple docstring"""
a_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a_ = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
assert len(str(__UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__lowercase : Union[str, Any] = year // 1_00
__lowercase : Union[str, Any] = (5 * (century % 4) + 2) % 7
__lowercase : int = year % 1_00
__lowercase : Optional[int] = centurian % 12
__lowercase : str = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__lowercase : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__lowercase : int = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a__ ( __magic_name__ ):
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : str = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(UpperCamelCase_ , "width_multiplier"))
class a__ :
def __init__( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : str=13 , UpperCamelCase_ : Optional[Any]=64 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Dict="swish" , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[Any]=32 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Union[str, Any]=10 , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[int]=0.25 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : Any = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8)
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = conv_kernel_size
__UpperCAmelCase : List[Any] = output_stride
__UpperCAmelCase : int = classifier_dropout_prob
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : List[Any] = is_training
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Union[str, Any] = scope
__UpperCAmelCase : int = width_multiplier
__UpperCAmelCase : Optional[int] = ffn_dropout
__UpperCAmelCase : Optional[int] = attn_dropout
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : str = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels)
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
__UpperCAmelCase : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def a_ ( self : Union[str, Any]):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str):
"""simple docstring"""
__UpperCAmelCase : str = MobileViTVaModel(config=UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a_ ( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.num_labels
__UpperCAmelCase : int = MobileViTVaForImageClassification(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Any = model(UpperCamelCase_ , labels=UpperCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple):
"""simple docstring"""
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : List[str] = MobileViTVaForSemanticSegmentation(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
__UpperCAmelCase : Tuple = model(UpperCamelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , labels=UpperCamelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : str = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase_ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Dict = MobileViTVaModelTester(self)
__UpperCAmelCase : List[Any] = MobileViTVaConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds")
def a_ ( self : Optional[int]):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings")
def a_ ( self : List[Any]):
"""simple docstring"""
pass
@unittest.skip(reason="MobileViTV2 does not output attentions")
def a_ ( self : List[Any]):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run.")
def a_ ( self : str):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def a_ ( self : List[str]):
"""simple docstring"""
pass
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[Any] = model_class(UpperCamelCase_)
__UpperCAmelCase : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int]):
__UpperCAmelCase : str = model_class(UpperCamelCase_)
model.to(UpperCamelCase_)
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : Tuple = outputs.hidden_states
__UpperCAmelCase : Tuple = 5
self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__UpperCAmelCase : Dict = 2
for i in range(len(UpperCamelCase_)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_)
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_)
@slow
def a_ ( self : Dict):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[str] = MobileViTVaModel.from_pretrained(UpperCamelCase_)
self.assertIsNotNone(UpperCamelCase_)
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def a_ ( self : Optional[int]):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
if is_vision_available()
else None
)
@slow
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256").to(
UpperCamelCase_)
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors="pt").to(UpperCamelCase_)
# forward pass
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**UpperCamelCase_)
# verify the logits
__UpperCAmelCase : str = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , UpperCamelCase_)
__UpperCAmelCase : List[str] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01]).to(UpperCamelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
@slow
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
__UpperCAmelCase : int = model.to(UpperCamelCase_)
__UpperCAmelCase : Dict = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : Optional[Any] = image_processor(images=UpperCamelCase_ , return_tensors="pt").to(UpperCamelCase_)
# forward pass
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**UpperCamelCase_)
__UpperCAmelCase : Tuple = outputs.logits
# verify the logits
__UpperCAmelCase : int = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , UpperCamelCase_)
__UpperCAmelCase : List[str] = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=UpperCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4))
@slow
def a_ ( self : Dict):
"""simple docstring"""
__UpperCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
__UpperCAmelCase : Union[str, Any] = model.to(UpperCamelCase_)
__UpperCAmelCase : int = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : Dict = image_processor(images=UpperCamelCase_ , return_tensors="pt").to(UpperCamelCase_)
# forward pass
with torch.no_grad():
__UpperCAmelCase : List[str] = model(**UpperCamelCase_)
__UpperCAmelCase : int = outputs.logits.detach().cpu()
__UpperCAmelCase : int = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(50, 60)])
__UpperCAmelCase : Tuple = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , UpperCamelCase_)
__UpperCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_)
__UpperCAmelCase : Tuple = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , UpperCamelCase_)
| 77 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 | 0 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
SCREAMING_SNAKE_CASE_: Union[str, Any] ='__DUMMY_TRANSFORMERS_USER__'
SCREAMING_SNAKE_CASE_: Optional[Any] ='Dummy User'
SCREAMING_SNAKE_CASE_: Tuple ='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'
SCREAMING_SNAKE_CASE_: str ='https://hub-ci.huggingface.co'
SCREAMING_SNAKE_CASE_: Dict =CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
SCREAMING_SNAKE_CASE_: List[Any] =CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
SCREAMING_SNAKE_CASE_: List[str] =Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , snake_case_ )
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , snake_case_ )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , snake_case_ )
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ : Dict ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , snake_case_ )
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Optional[int] ) -> int:
'''simple docstring'''
HfFolder.save_token(snake_case_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
return HfApi(endpoint=snake_case_ )
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( snake_case_ : HfApi ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = HfFolder.get_token()
HfFolder.save_token(snake_case_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(snake_case_ )
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ : int ) -> Dict:
'''simple docstring'''
def _cleanup_repo(snake_case_ : str ):
hf_api.delete_repo(snake_case_ , token=snake_case_ , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def lowerCAmelCase_ ( snake_case_ : Dict ) -> Optional[Any]:
'''simple docstring'''
@contextmanager
def _temporary_repo(snake_case_ : Optional[Any] ):
try:
yield repo_id
finally:
cleanup_repo(snake_case_ )
return _temporary_repo
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( snake_case_ : HfApi , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = f"""repo_txt_data-{int(time.time() * 1_0E3 )}"""
UpperCAmelCase_ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(snake_case_ , token=snake_case_ , repo_type="dataset" , private=snake_case_ )
hf_api.upload_file(
token=snake_case_ , path_or_fileobj=str(snake_case_ ) , path_in_repo="data/text_data.txt" , repo_id=snake_case_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(snake_case_ , token=snake_case_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ) -> Any:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( snake_case_ : HfApi , snake_case_ : Tuple , snake_case_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = f"""repo_zipped_txt_data-{int(time.time() * 1_0E3 )}"""
UpperCAmelCase_ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(snake_case_ , token=snake_case_ , repo_type="dataset" , private=snake_case_ )
hf_api.upload_file(
token=snake_case_ , path_or_fileobj=str(snake_case_ ) , path_in_repo="data.zip" , repo_id=snake_case_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(snake_case_ , token=snake_case_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : List[str] ) -> int:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( snake_case_ : HfApi , snake_case_ : str , snake_case_ : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = f"""repo_zipped_img_data-{int(time.time() * 1_0E3 )}"""
UpperCAmelCase_ = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(snake_case_ , token=snake_case_ , repo_type="dataset" , private=snake_case_ )
hf_api.upload_file(
token=snake_case_ , path_or_fileobj=str(snake_case_ ) , path_in_repo="data.zip" , repo_id=snake_case_ , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(snake_case_ , token=snake_case_ , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 78 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14 | 0 |
SCREAMING_SNAKE_CASE__ : Dict = 6_55_21
def _lowerCamelCase ( __lowerCamelCase ) -> int:
'''simple docstring'''
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Tuple = 0
for plain_chr in plain_text:
UpperCAmelCase__ : int = (a + ord(__lowerCamelCase )) % MOD_ADLER
UpperCAmelCase__ : List[str] = (b + a) % MOD_ADLER
return (b << 16) | a
| 79 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
from __future__ import annotations
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__lowercase , __lowercase = array[indexa], array[indexa]
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if length > 1:
__lowercase = int(length / 2 )
for i in range(lowerCamelCase , low + middle ):
comp_and_swap(lowerCamelCase , lowerCamelCase , i + middle , lowerCamelCase )
bitonic_merge(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
bitonic_merge(lowerCamelCase , low + middle , lowerCamelCase , lowerCamelCase )
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if length > 1:
__lowercase = int(length / 2 )
bitonic_sort(lowerCamelCase , lowerCamelCase , lowerCamelCase , 1 )
bitonic_sort(lowerCamelCase , low + middle , lowerCamelCase , 0 )
bitonic_merge(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
__UpperCamelCase : int = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 80 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = name
_a : List[str] = value
_a : List[str] = weight
def __repr__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __lowercase ( self ) -> List[Any]:
return self.value
def __lowercase ( self ) -> int:
return self.name
def __lowercase ( self ) -> Optional[int]:
return self.weight
def __lowercase ( self ) -> Optional[Any]:
return self.value / self.weight
def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : List[str] ) -> List[str]:
"""simple docstring"""
_a : Optional[int] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( __a : int ,__a : Union[str, Any] ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = sorted(__a ,key=__a ,reverse=__a )
_a : Any = []
_a , _a : Optional[int] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase = 1_0 ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or n < 0:
raise ValueError("Invalid input" )
__snake_case : List[str] = 1_0**n
__snake_case : Optional[int] = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , __lowerCamelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 81 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.dummy_uncond_unet
UpperCAmelCase_ = KarrasVeScheduler()
UpperCAmelCase_ = KarrasVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(num_inference_steps=2 , generator=_UpperCAmelCase , output_type="numpy" ).images
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(num_inference_steps=2 , generator=_UpperCAmelCase , output_type="numpy" , return_dict=_UpperCAmelCase )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "google/ncsnpp-celebahq-256"
UpperCAmelCase_ = UNetaDModel.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = KarrasVeScheduler()
UpperCAmelCase_ = KarrasVePipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = pipe(num_inference_steps=20 , generator=_UpperCAmelCase , output_type="numpy" ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase_ = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 82 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 0 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[str] = prime_factors(A_ )
if is_square_free(A_ ):
return -1 if len(A_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case ( UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ = 'swin'
lowercase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : str , a_ : Dict=224 , a_ : Optional[int]=4 , a_ : Optional[Any]=3 , a_ : Optional[int]=96 , a_ : List[str]=[2, 2, 6, 2] , a_ : Union[str, Any]=[3, 6, 12, 24] , a_ : int=7 , a_ : List[Any]=4.0 , a_ : Any=True , a_ : str=0.0 , a_ : str=0.0 , a_ : List[str]=0.1 , a_ : Union[str, Any]="gelu" , a_ : List[str]=False , a_ : List[str]=0.02 , a_ : Union[str, Any]=1e-5 , a_ : int=32 , a_ : Optional[Any]=None , a_ : List[Any]=None , **a_ : Dict , )-> Any:
"""simple docstring"""
super().__init__(**a_ )
SCREAMING_SNAKE_CASE__ : Dict = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Dict = embed_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = depths
SCREAMING_SNAKE_CASE__ : List[Any] = len(a_ )
SCREAMING_SNAKE_CASE__ : str = num_heads
SCREAMING_SNAKE_CASE__ : Any = window_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = mlp_ratio
SCREAMING_SNAKE_CASE__ : Any = qkv_bias
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = drop_path_rate
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ : Optional[int] = int(embed_dim * 2 ** (len(a_ ) - 1) )
SCREAMING_SNAKE_CASE__ : Tuple = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(a_ ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = get_aligned_output_features_output_indices(
out_features=a_ , out_indices=a_ , stage_names=self.stage_names )
class snake_case ( UpperCamelCase_ ):
lowercase_ = version.parse('1.11' )
@property
def __lowercase( self : Optional[Any] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowercase( self : List[str] )-> float:
"""simple docstring"""
return 1e-4
| 85 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Dict = '''ZinengTang/tvlt-base'''
_a : List[str] = tempfile.mkdtemp()
def __lowercase ( self , **_a ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : Union[str, Any] = np.ones([1_2_0_0_0] )
_a : Dict = feature_extractor(_a , return_tensors='''np''' )
_a : Tuple = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = image_processor(_a , return_tensors='''np''' )
_a : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[str] = np.ones([1_2_0_0_0] )
_a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 14 | 0 |
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = len(__UpperCamelCase )
while cur > 1:
# Find the maximum number in arr
A_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A_ = arr[mi::-1] + arr[mi + 1 : len(__UpperCamelCase )]
# Reverse whole list
A_ = arr[cur - 1 :: -1] + arr[cur : len(__UpperCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
__a :Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
__a :List[Any] = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted)) | 86 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 | 0 |
import re
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
if len(re.findall('''[ATCG]''' , lowercase_ ) ) != len(lowercase_ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Optional[int] ) -> Dict:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : int ,__a : List[str]="attention" ) -> List[str]:
"""simple docstring"""
_a : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Tuple = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_a : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Any = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_a : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Union[str, Any] ,__a : List[Any] ,__a : Any=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : List[str] = (wi_a, wi_a)
else:
_a : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : str ) -> List[str]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCAmelCase ( __a : dict ,*, __a : int ,__a : bool ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : Dict = traverse_util.flatten_dict(variables['''target'''] )
_a : Any = {'''/'''.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : Optional[int] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,__a )
_a : Tuple = collections.OrderedDict()
# Shared embeddings.
_a : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Optional[Any] = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_attention_layer_norm''' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(__a ,__a ,'''encoder''' ,'''attention''' )
_a : List[str] = layer_norm
_a : Optional[Any] = k.T
_a : str = o.T
_a : List[Any] = q.T
_a : Tuple = v.T
# Block i, layer 1 (MLP).
_a : str = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Any = tax_mlp_lookup(__a ,__a ,'''encoder''' ,__a )
_a : str = layer_norm
if split_mlp_wi:
_a : List[Any] = wi[0].T
_a : Any = wi[1].T
else:
_a : Any = wi.T
_a : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Dict = tax_relpos_bias_lookup(
__a ,__a ,'''encoder''' ).T
_a : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_a : List[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''encoder''' ).T
_a : Optional[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Union[str, Any] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_a , _a , _a , _a : Optional[Any] = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''self_attention''' )
_a : Optional[Any] = layer_norm
_a : Dict = k.T
_a : str = o.T
_a : str = q.T
_a : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_a : Any = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_a , _a , _a , _a : str = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''encoder_decoder_attention''' )
_a : Optional[Any] = layer_norm
_a : Optional[int] = k.T
_a : Dict = o.T
_a : str = q.T
_a : int = v.T
# Block i, layer 2 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Tuple = tax_mlp_lookup(__a ,__a ,'''decoder''' ,__a )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : List[Any] = wi[1].T
else:
_a : Dict = wi.T
_a : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Tuple = tax_relpos_bias_lookup(__a ,__a ,'''decoder''' ).T
_a : Tuple = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : Any = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( __a : Dict ,__a : bool ) -> Tuple:
"""simple docstring"""
_a : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_a : str = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
_a : List[str] = checkpoints.load_tax_checkpoint(__a )
_a : str = convert_tax_to_pytorch(
__a ,num_layers=config.num_layers ,is_encoder_only=__a ,scalable_attention=__a )
_a : str = make_state_dict(__a ,__a )
model.load_state_dict(__a ,strict=__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ,__a : Union[str, Any] ,__a : bool = False ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = MTaConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(__a )
else:
_a : Tuple = UMTaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a ,__a ,__a ,__a ,__a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 14 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = LxmertConfig.from_json_file(__snake_case )
print(F'Building PyTorch model from configuration: {config}' )
_lowerCamelCase : Any = LxmertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 88 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a__ = '''Usage of script: script_name <size_of_canvas:int>'''
a__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( __a : int ) -> list[list[bool]]:
"""simple docstring"""
_a : int = [[False for i in range(__a )] for j in range(__a )]
return canvas
def __UpperCAmelCase ( __a : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
_a : Optional[int] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( __a : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
_a : Any = np.array(__a )
_a : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
_a : Tuple = __judge_point(
__a ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( __a : bool ,__a : list[list[bool]] ) -> bool:
"""simple docstring"""
_a : Optional[Any] = 0
_a : str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a : Optional[int] = pt
if pt:
if alive < 2:
_a : Dict = False
elif alive == 2 or alive == 3:
_a : Optional[Any] = True
elif alive > 3:
_a : str = False
else:
if alive == 3:
_a : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a__ = int(sys.argv[1])
# main working structure of this module.
a__ = create_canvas(canvas_size)
seed(c)
a__ , a__ = plt.subplots()
fig.show()
a__ = ListedColormap(['''w''', '''k'''])
try:
while True:
a__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 14 | 0 |
from __future__ import annotations
from math import pi
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 | 0 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCAmelCase__ = min(A , A )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 90 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowercase = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
_lowercase = {
'''xlm-roberta-base''': 5_12,
'''xlm-roberta-large''': 5_12,
'''xlm-roberta-large-finetuned-conll02-dutch''': 5_12,
'''xlm-roberta-large-finetuned-conll02-spanish''': 5_12,
'''xlm-roberta-large-finetuned-conll03-english''': 5_12,
'''xlm-roberta-large-finetuned-conll03-german''': 5_12,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
A = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A = 1
A = len(self.sp_model ) + self.fairseq_offset
A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ) -> Any:
A = self.__dict__.copy()
A = None
A = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str ,A_ : str ) -> Optional[Any]:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = ''.join(A_ ).replace(A_ ,' ' ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 91 |
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : List[str] = F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _lowerCAmelCase ( __magic_name__ : Dict ) -> List[Any]:
lowercase : Union[str, Any] ='''huggingface/label-files'''
lowercase : Optional[int] ='''imagenet-1k-id2label.json'''
lowercase : Optional[Any] =json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : List[Any] ={int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any ={v: k for k, v in idalabel.items()}
lowercase : Union[str, Any] ='''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowercase : Any =BitConfig(
conv_layer=__magic_name__ , num_labels=1000 , idalabel=__magic_name__ , labelaid=__magic_name__ , )
return config
def _lowerCAmelCase ( __magic_name__ : Optional[Any] ) -> List[str]:
if "stem.conv" in name:
lowercase : Tuple =name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowercase : Union[str, Any] =name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
lowercase : Tuple =name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
lowercase : Tuple ='''bit.''' + name
if "bit" not in name and "classifier" not in name:
lowercase : Optional[int] ='''bit.encoder.''' + name
return name
def _lowerCAmelCase ( ) -> Tuple:
lowercase : int ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Any =Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[Any]=False ) -> Tuple:
lowercase : Optional[Any] =get_config(__magic_name__ )
# load original model from timm
lowercase : Dict =create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
# load state_dict of original model
lowercase : List[str] =timm_model.state_dict()
for key in state_dict.copy().keys():
lowercase : Union[str, Any] =state_dict.pop(__magic_name__ )
lowercase : List[Any] =val.squeeze() if '''head''' in key else val
# load HuggingFace model
lowercase : str =BitForImageClassification(__magic_name__ )
model.eval()
model.load_state_dict(__magic_name__ )
# create image processor
lowercase : str =create_transform(**resolve_data_config({} , model=__magic_name__ ) )
lowercase : int =transform.transforms
lowercase : Union[str, Any] ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
lowercase : Optional[Any] =BitImageProcessor(
do_resize=__magic_name__ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__magic_name__ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__magic_name__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowercase : Optional[Any] =prepare_img()
lowercase : Optional[Any] =transform(__magic_name__ ).unsqueeze(0 )
lowercase : Optional[Any] =processor(__magic_name__ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__magic_name__ , __magic_name__ )
# verify logits
with torch.no_grad():
lowercase : List[str] =model(__magic_name__ )
lowercase : Any =outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowercase : List[str] =timm_model(__magic_name__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__magic_name__ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""resnetv2_50x1_bitm""",
type=str,
help="""Name of the BiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model to the hub.""",
)
UpperCamelCase_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 92 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ,__a : Any ) -> int:
"""simple docstring"""
_a : int = 0
if start < end:
_a : Tuple = randint(__a ,__a )
_a : Tuple = a[end]
_a : List[str] = a[pivot]
_a : Any = temp
_a , _a : Optional[int] = _in_place_partition(__a ,__a ,__a )
count += _in_place_quick_sort(__a ,__a ,p - 1 )
count += _in_place_quick_sort(__a ,p + 1 ,__a )
return count
def __UpperCAmelCase ( __a : List[Any] ,__a : Tuple ,__a : Dict ) -> Dict:
"""simple docstring"""
_a : Dict = 0
_a : Tuple = randint(__a ,__a )
_a : List[Any] = a[end]
_a : str = a[pivot]
_a : str = temp
_a : Dict = start - 1
for index in range(__a ,__a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : int = new_pivot_index + 1
_a : Any = a[new_pivot_index]
_a : Optional[int] = a[index]
_a : str = temp
_a : Union[str, Any] = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : Any = temp
return new_pivot_index + 1, count
a__ = TemporaryFile()
a__ = 100 # 1000 elements are to be sorted
a__ , a__ = 0, 1 # mean and standard deviation
a__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ = np.load(outfile)
a__ = len(M) - 1
a__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 14 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = ["""input_features"""]
def __init__( self , __UpperCAmelCase=8_0 , __UpperCAmelCase=1_6_0_0_0 , __UpperCAmelCase=1_6_0 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Any = n_fft
lowerCAmelCase__ :List[Any] = hop_length
lowerCAmelCase__ :Tuple = chunk_length
lowerCAmelCase__ :Tuple = chunk_length * sampling_rate
lowerCAmelCase__ :Tuple = self.n_samples // hop_length
lowerCAmelCase__ :List[Any] = sampling_rate
lowerCAmelCase__ :Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCAmelCase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__UpperCAmelCase , norm='slaney' , mel_scale='slaney' , )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = spectrogram(
__UpperCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
lowerCAmelCase__ :Dict = log_spec[:, :-1]
lowerCAmelCase__ :List[Any] = np.maximum(__UpperCAmelCase , log_spec.max() - 8.0 )
lowerCAmelCase__ :Optional[Any] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
lowerCAmelCase__ :Tuple = np.array(__UpperCAmelCase , np.intaa )
lowerCAmelCase__ :Any = []
for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1 ) ):
lowerCAmelCase__ :Any = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase__ :List[Any] = padding_value
normed_input_values.append(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = "max_length" , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase__ :Optional[int] = isinstance(__UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
lowerCAmelCase__ :Optional[int] = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ :Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
lowerCAmelCase__ :Union[str, Any] = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ :List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ :Any = [np.asarray([raw_speech] ).T]
lowerCAmelCase__ :List[str] = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
lowerCAmelCase__ :str = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase__ :Union[str, Any] = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
lowerCAmelCase__ :Tuple = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
lowerCAmelCase__ :Tuple = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
lowerCAmelCase__ :Optional[Any] = [self._np_extract_fbank_features(__UpperCAmelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __UpperCAmelCase ):
lowerCAmelCase__ :Any = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
else:
lowerCAmelCase__ :Union[str, Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase__ :Union[str, Any] = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase__ :List[Any] = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ :List[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 93 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14 | 0 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = BertJapaneseTokenizer
UpperCamelCase_ = False
UpperCamelCase_ = True
def A__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
super().setUp()
lowercase : Optional[int] =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
lowercase : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def A__ ( self : List[str] , UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] ='''こんにちは、世界。 \nこんばんは、世界。'''
lowercase : Dict ='''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def A__ ( self : Tuple , UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
lowercase , lowercase : List[str] =self.get_input_output_texts(UpperCAmelCase )
lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowercase : Optional[Any] =tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def A__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass # TODO add if relevant
def A__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.tokenizer_class(self.vocab_file )
lowercase : Dict =tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' )
self.assertListEqual(UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def A__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowercase : Optional[int] =self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' )
self.assertIsNotNone(UpperCAmelCase )
lowercase : Dict ='''こんにちは、世界。\nこんばんは、世界。'''
lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , '''rb''' ) as handle:
lowercase : Tuple =pickle.load(UpperCAmelCase )
lowercase : Dict =tokenizer_new.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : int =MecabTokenizer(mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A__ ( self : List[str] ) -> Any:
'''simple docstring'''
try:
lowercase : Tuple =MecabTokenizer(mecab_dic='''unidic_lite''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
try:
lowercase : Optional[int] =MecabTokenizer(mecab_dic='''unidic''' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =MecabTokenizer(do_lower_case=UpperCAmelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A__ ( self : str ) -> str:
'''simple docstring'''
try:
lowercase : Any =MecabTokenizer(
do_lower_case=UpperCAmelCase , normalize_text=UpperCAmelCase , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] =MecabTokenizer(normalize_text=UpperCAmelCase , mecab_dic='''ipadic''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : int =self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' )
self.assertIsNotNone(UpperCAmelCase )
lowercase : Optional[int] ='''こんにちは、世界。\nこんばんは、世界。'''
lowercase : Any =tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Union[str, Any] =os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , '''rb''' ) as handle:
lowercase : Tuple =pickle.load(UpperCAmelCase )
lowercase : Tuple =tokenizer_new.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@require_sudachi
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowercase : str =SudachiTokenizer(sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] )
@require_sudachi
def A__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] )
@require_sudachi
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase : Optional[Any] =SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' )
self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] )
@require_sudachi
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =SudachiTokenizer(do_lower_case=UpperCAmelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase : int =SudachiTokenizer(normalize_text=UpperCAmelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : int =SudachiTokenizer(trim_whitespace=UpperCAmelCase , sudachi_dict_type='''core''' )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def A__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : int =self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' )
self.assertIsNotNone(UpperCAmelCase )
lowercase : Dict ='''こんにちは、世界。\nこんばんは、世界。'''
lowercase : List[Any] =tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Any =os.path.join(self.tmpdirname , '''tokenizer.bin''' )
with open(UpperCAmelCase , '''wb''' ) as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , '''rb''' ) as handle:
lowercase : Union[str, Any] =pickle.load(UpperCAmelCase )
lowercase : str =tokenizer_new.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@require_jumanpp
def A__ ( self : Any ) -> Any:
'''simple docstring'''
lowercase : Any =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A__ ( self : int ) -> int:
'''simple docstring'''
lowercase : Any =JumanppTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] =JumanppTokenizer(normalize_text=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Any =JumanppTokenizer(trim_whitespace=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def A__ ( self : List[str] ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : int =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
lowercase : Optional[Any] ={}
for i, token in enumerate(UpperCAmelCase ):
lowercase : str =i
lowercase : int =WordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] )
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] )
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' )
lowercase : Tuple =tokenizer.subword_tokenizer
lowercase : Union[str, Any] =subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' )
self.assertListEqual(UpperCAmelCase , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] )
lowercase : Optional[Any] =subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' )
self.assertListEqual(UpperCAmelCase , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] )
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' )
lowercase : Tuple =tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase )
lowercase : Tuple =tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase )
lowercase : Any =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowercase : Tuple =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = BertJapaneseTokenizer
UpperCamelCase_ = False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowercase : Any =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def A__ ( self : Dict , **UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase )
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase : int ='''こんにちは、世界。 \nこんばんは、世界。'''
lowercase : Optional[int] ='''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def A__ ( self : Dict ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def A__ ( self : str ) -> str:
'''simple docstring'''
pass # TODO add if relevant
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
pass # TODO add if relevant
def A__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase : Dict =self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' )
lowercase : List[str] =tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' )
self.assertListEqual(
UpperCAmelCase , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def A__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
lowercase : List[Any] ={}
for i, token in enumerate(UpperCAmelCase ):
lowercase : Tuple =i
lowercase : Dict =CharacterTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] )
self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] )
def A__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' )
lowercase : Tuple =tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase )
lowercase : List[str] =tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase )
lowercase : Any =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowercase : List[str] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[int] ='''cl-tohoku/bert-base-japanese'''
lowercase : Optional[Any] =AutoTokenizer.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str ='''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertTokenizer.from_pretrained(UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
lowercase : str ='''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.''' ) )
| 94 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14 | 0 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def snake_case ( A__ ,A__ ,A__ ,A__ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
UpperCAmelCase_ : List[Any] = TOKENIZER_CLASSES
else:
UpperCAmelCase_ : int = {tokenizer_name: getattr(A__ ,tokenizer_name + "Fast" )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase_ : Optional[int] = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase_ : Union[str, Any] = True
if checkpoint_name is None:
UpperCAmelCase_ : Union[str, Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase_ : Any = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
UpperCAmelCase_ : Optional[int] = tokenizer_class.from_pretrained(A__ ,force_download=A__ )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = checkpoint.split("/" )
UpperCAmelCase_ : List[str] = os.path.join(A__ ,A__ )
elif add_prefix:
UpperCAmelCase_ : List[Any] = checkpoint
UpperCAmelCase_ : Optional[Any] = dump_path
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase_ : Optional[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase_ : List[Any] = file_path.split(A__ )[-1][0]
if next_char == "/":
UpperCAmelCase_ : Dict = os.path.join(A__ ,A__ )
UpperCAmelCase_ : Dict = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
UpperCAmelCase_ : Dict = tokenizer.save_pretrained(
A__ ,legacy_format=A__ ,filename_prefix=A__ )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(A__ )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
lowerCamelCase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 95 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
"""simple docstring"""
def a ( __UpperCAmelCase : List[Any] ) -> str:
__magic_name__: Optional[int] = [0] * len(__UpperCAmelCase )
__magic_name__: str = []
__magic_name__: Any = []
__magic_name__: Union[str, Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(__UpperCAmelCase )
while queue:
__magic_name__: Optional[Any] = queue.pop(0 )
cnt += 1
topo.append(__UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__UpperCAmelCase )
if cnt != len(__UpperCAmelCase ):
print("""Cycle exists""" )
else:
print(__UpperCAmelCase )
# Adjacency List of Graph
__lowerCamelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 96 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__a = logging.get_logger(__name__)
def a ( snake_case__: bool , snake_case__: bool ):
'''simple docstring'''
def run_func(snake_case__: List[Any] ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__: List[Any] , **snake_case__: str ):
return func(*snake_case__ , **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__: str , **snake_case__: Dict ):
return func(*snake_case__ , **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def a ( snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
lowercase_ = random.Random()
lowercase_ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :TensorFlowBenchmarkArguments
a :PretrainedConfig
a :str = "TensorFlow"
@property
def _lowercase ( self : Tuple ) -> Dict:
return tf.__version__
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> float:
# initialize GPU on separate process
lowercase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowercase_ = self._prepare_inference_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_speed(_inference )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> float:
lowercase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowercase_ = self._prepare_train_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_speed(_train )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowercase_ = self._prepare_inference_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_memory(_inference )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowercase_ = self._prepare_train_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_memory(_train )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Callable[[], None]:
lowercase_ = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowercase_ = (
hasattr(SCREAMING_SNAKE_CASE_ , '''architectures''' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase_ = __import__('''transformers''' , fromlist=[model_class] )
lowercase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = model_cls(SCREAMING_SNAKE_CASE_ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowercase_ = TF_MODEL_MAPPING[config.__class__](SCREAMING_SNAKE_CASE_ )
# encoder-decoder has vocab size saved differently
lowercase_ = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE_ , '''vocab_size''' ) else config.encoder.vocab_size
lowercase_ = random_input_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowercase_ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Callable[[], None]:
lowercase_ = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowercase_ = (
hasattr(SCREAMING_SNAKE_CASE_ , '''architectures''' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase_ = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase_ = __import__('''transformers''' , fromlist=[model_class] )
lowercase_ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = model_cls(SCREAMING_SNAKE_CASE_ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowercase_ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](SCREAMING_SNAKE_CASE_ )
# encoder-decoder has vocab size saved differently
lowercase_ = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE_ , '''vocab_size''' ) else config.encoder.vocab_size
lowercase_ = random_input_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowercase_ = model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = tf.gradients(SCREAMING_SNAKE_CASE_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowercase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = tf.gradients(SCREAMING_SNAKE_CASE_ , model.trainable_variables )
return gradients
lowercase_ = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Tuple ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(SCREAMING_SNAKE_CASE_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowercase_ = timeit.repeat(
SCREAMING_SNAKE_CASE_ , repeat=self.args.repeat , number=1_0 , )
return min(SCREAMING_SNAKE_CASE_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
lowercase_ = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
lowercase_ = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
lowercase_ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowercase_ = nvml.nvmlDeviceGetMemoryInfo(SCREAMING_SNAKE_CASE_ )
lowercase_ = meminfo.used
lowercase_ = Memory(SCREAMING_SNAKE_CASE_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
lowercase_ = None
else:
lowercase_ = measure_peak_memory_cpu(SCREAMING_SNAKE_CASE_ )
lowercase_ = Memory(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowercase_ = stop_memory_tracing(SCREAMING_SNAKE_CASE_ )
if memory is None:
lowercase_ = summary.total
else:
lowercase_ = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 97 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 | 0 |
'''simple docstring'''
from collections.abc import Sequence
def a__ ( lowercase : Sequence[float], lowercase : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(lowercase ) )
def a__ ( lowercase : Sequence[float], lowercase : float ) -> float:
"""simple docstring"""
_UpperCamelCase = 0.0
for coeff in reversed(lowercase ):
_UpperCamelCase = result * x + coeff
return result
if __name__ == "__main__":
lowercase__ : Dict = (0.0, 0.0, 5.0, 9.3, 7.0)
lowercase__ : Optional[int] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 98 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = ["""pixel_values"""]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ):
super().__init__(**__A )
__a = size if size is not None else {"""shortest_edge""": 256}
__a = get_size_dict(__A , default_to_square=__A )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__A )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ):
__a = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__a = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def snake_case_ ( self , __A , __A , __A = None , **__A , ):
__a = get_size_dict(__A )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def snake_case_ ( self , __A , __A , __A = None , **__A ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def snake_case_ ( self , __A , __A , __A , __A = None , **__A , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def snake_case_ ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(__A , default_to_square=__A )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__A )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__A ) for image in images]
if do_resize:
__a = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__a = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__a = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__a = [to_channel_dimension_format(__A , __A ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
| 99 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_A : int = get_tests_dir("""fixtures""")
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = mock.Mock()
SCREAMING_SNAKE_CASE__ = 5_00
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = HTTPError
SCREAMING_SNAKE_CASE__ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=A_ ) as mock_head:
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowercase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = TOKEN
HfFolder.save_token(A_ )
@classmethod
def lowercase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_ , repo_id='''test-feature-extractor''' , push_to_hub=A_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A_ , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=A_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def lowercase_ ( self ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ = CustomFeatureExtractor.from_pretrained(A_ )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(
f'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 100 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ : List[str] =3_00 # TEMPERATURE (unit = K)
def a__ ( A__, A__, A__, ):
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 0 |
"""simple docstring"""
import math
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return math.pow(SCREAMING_SNAKE_CASE , 2 ) - a
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return 2 * x
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = 2.0
while start <= a:
UpperCamelCase : Optional[Any] = math.pow(SCREAMING_SNAKE_CASE , 2 )
return start
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 9999 , SCREAMING_SNAKE_CASE = 0.00_00_00_00_00_00_01 ):
if a < 0:
raise ValueError("""math domain error""" )
UpperCamelCase : Optional[Any] = get_initial_point(SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = value
UpperCamelCase : Tuple = value - fx(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / fx_derivative(SCREAMING_SNAKE_CASE )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 102 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = name
_a : List[str] = value
_a : List[str] = weight
def __repr__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __lowercase ( self ) -> List[Any]:
return self.value
def __lowercase ( self ) -> int:
return self.name
def __lowercase ( self ) -> Optional[int]:
return self.weight
def __lowercase ( self ) -> Optional[Any]:
return self.value / self.weight
def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : List[str] ) -> List[str]:
"""simple docstring"""
_a : Optional[int] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( __a : int ,__a : Union[str, Any] ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = sorted(__a ,key=__a ,reverse=__a )
_a : Any = []
_a , _a : Optional[int] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case = logging.get_logger(__name__)
snake_case = {'''vocab_file''': '''spiece.model'''}
snake_case = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
snake_case = {
'''AI-Sweden/gpt-sw3-126m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-350m''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-1.6b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-6.7b''': 2_0_4_8,
'''AI-Sweden/gpt-sw3-20b''': 2_0_4_8,
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : Dict = VOCAB_FILES_NAMES
A__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : int , __lowerCamelCase : Any , __lowerCamelCase : Dict=False , __lowerCamelCase : int=False , __lowerCamelCase : Dict=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : List[str] , ):
"""simple docstring"""
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
_snake_case = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
_snake_case = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_snake_case = '''<|endoftext|>''' if eos_token is None else eos_token
_snake_case = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_snake_case = unk_token if pad_token is None else pad_token
_snake_case = eos_token if bos_token is None else bos_token
else:
_snake_case = '''<pad>''' if pad_token is None else pad_token
_snake_case = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_snake_case = do_lower_case
_snake_case = remove_space
_snake_case = keep_accents
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
# Used for whitespace normalization in input texts
# fmt : off
_snake_case = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_snake_case = re.compile(
f"""[{''.join(map(__lowerCamelCase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]""" )
def __getstate__( self : Optional[Any] ):
"""simple docstring"""
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : List[Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return len(self.sp_model )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = self.non_printing_characters_re.sub('''''' , __lowerCamelCase )
# Normalize whitespaces
_snake_case = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
_snake_case = unicodedata.normalize('''NFC''' , __lowerCamelCase )
return text
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , **__lowerCamelCase : List[Any] ):
"""simple docstring"""
_snake_case = self.preprocess_text(__lowerCamelCase )
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def __UpperCAmelCase ( self : str , __lowerCamelCase : str ):
"""simple docstring"""
return self.sp_model.PieceToId(__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : int ):
"""simple docstring"""
return self.sp_model.IdToPiece(__lowerCamelCase )
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : str ):
"""simple docstring"""
return out_string
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = []
_snake_case = ''''''
_snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_snake_case = True
_snake_case = []
else:
current_sub_tokens.append(__lowerCamelCase )
_snake_case = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, List[str]] , __lowerCamelCase : Union[str, bool] = False ):
"""simple docstring"""
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = self.preprocess_text(__lowerCamelCase )
_snake_case = self.sp_model.encode(__lowerCamelCase )
else:
_snake_case = [self.preprocess_text(__lowerCamelCase ) for t in text]
_snake_case = self.sp_model.encode(__lowerCamelCase )
if return_tensors is True or return_tensors == "pt":
_snake_case = torch.tensor(__lowerCamelCase )
return token_ids
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Union[int, List[int]] ):
"""simple docstring"""
return self.sp_model.decode(__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : "Conversation" ):
"""simple docstring"""
_snake_case = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
_snake_case = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(__lowerCamelCase ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=__lowerCamelCase )
| 103 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 | 0 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Any = "owlvit_text_model"
def __init__( self , SCREAMING_SNAKE_CASE__=49408 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__="quick_gelu" , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=49406 , SCREAMING_SNAKE_CASE__=49407 , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A__ = vocab_size
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = max_position_embeddings
A__ = hidden_act
A__ = layer_norm_eps
A__ = attention_dropout
A__ = initializer_range
A__ = initializer_factor
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
A__ , A__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
A__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : str = "owlvit_vision_model"
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__="quick_gelu" , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1.0 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = image_size
A__ = patch_size
A__ = hidden_act
A__ = layer_norm_eps
A__ = attention_dropout
A__ = initializer_range
A__ = initializer_factor
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
A__ , A__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
A__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Optional[Any] = "owlvit"
A__ : Dict = True
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2.6_5_9_2 , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
if text_config is None:
A__ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
A__ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
A__ = OwlViTTextConfig(**SCREAMING_SNAKE_CASE__ )
A__ = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE__ )
A__ = projection_dim
A__ = logit_scale_init_value
A__ = return_dict
A__ = 1.0
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
A__ , A__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Any:
A__ = {}
A__ = text_config
A__ = vision_config
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> int:
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def snake_case__ ( self ) -> float:
return 1e-4
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
A__ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
A__ = super().generate_dummy_inputs(
processor.image_processor , batch_size=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
return {**text_input_dict, **image_input_dict}
@property
def snake_case__ ( self ) -> int:
return 14
| 104 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : Union[str, Any] = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case :int ={
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case :Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 106 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict=13, UpperCamelCase__ : Optional[Any]=7, UpperCamelCase__ : List[str]=True, UpperCamelCase__ : Union[str, Any]=True, UpperCamelCase__ : Optional[int]=True, UpperCamelCase__ : Optional[Any]=True, UpperCamelCase__ : Dict=99, UpperCamelCase__ : Dict=32, UpperCamelCase__ : Any=2, UpperCamelCase__ : Optional[int]=4, UpperCamelCase__ : Tuple=37, UpperCamelCase__ : Union[str, Any]="gelu", UpperCamelCase__ : Optional[Any]=0.1, UpperCamelCase__ : Any=0.1, UpperCamelCase__ : Union[str, Any]=5_12, UpperCamelCase__ : Optional[Any]=16, UpperCamelCase__ : List[str]=2, UpperCamelCase__ : List[Any]=0.02, UpperCamelCase__ : List[str]=3, UpperCamelCase__ : Optional[Any]=4, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Union[str, Any]=0, ) -> str:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = scope
_A = projection_dim
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
_A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_A = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size], self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
_A = ids_tensor([self.batch_size], self.num_choices )
_A = BertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=UpperCamelCase__, initializer_range=self.initializer_range, )
_A = DPRConfig(projection_dim=self.projection_dim, **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : Any, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : List[Any], UpperCamelCase__ : Union[str, Any] ) -> int:
_A = TFDPRContextEncoder(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : str, UpperCamelCase__ : str ) -> int:
_A = TFDPRQuestionEncoder(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__, token_type_ids=UpperCamelCase__ )
_A = model(UpperCamelCase__ )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) )
def __UpperCAmelCase ( self : int, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int] ) -> Any:
_A = TFDPRReader(config=UpperCamelCase__ )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,) )
def __UpperCAmelCase ( self : Dict ) -> Dict:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class lowercase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__lowerCAmelCase = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_A = TFDPRModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__, hidden_size=37 )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*UpperCamelCase__ )
def __UpperCAmelCase ( self : int ) -> Tuple:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*UpperCamelCase__ )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRQuestionEncoder.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFDPRReader.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_A = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_A = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
_A = model(UpperCamelCase__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_A = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy(), expected_slice.numpy(), atol=1e-4 ) )
| 107 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Dict = '''ZinengTang/tvlt-base'''
_a : List[str] = tempfile.mkdtemp()
def __lowercase ( self , **_a ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : Union[str, Any] = np.ones([1_2_0_0_0] )
_a : Dict = feature_extractor(_a , return_tensors='''np''' )
_a : Tuple = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = image_processor(_a , return_tensors='''np''' )
_a : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[str] = np.ones([1_2_0_0_0] )
_a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 14 | 0 |
import re
from filelock import FileLock
try:
import nltk
__a: Any = True
except (ImportError, ModuleNotFoundError):
__a: Optional[int] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> str:
re.sub("""<n>""" , """""" , __snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__snake_case ) ) | 108 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a ( _snake_case, _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : str = StableDiffusionInstructPixaPixPipeline
__UpperCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
__UpperCamelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=8 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=lowerCamelCase )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
__SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase )
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Optional[Any]=0 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 ,2 ,3 ,1 )[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("""RGB""" )
if str(lowerCamelCase ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase )
__SCREAMING_SNAKE_CASE = """french fries"""
__SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase ,negative_prompt=lowerCamelCase )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase )
__SCREAMING_SNAKE_CASE = [inputs["""prompt"""]] * 2
__SCREAMING_SNAKE_CASE = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).unsqueeze(0 ).to(lowerCamelCase )
__SCREAMING_SNAKE_CASE = image / 2 + 0.5
__SCREAMING_SNAKE_CASE = image.permute(0 ,3 ,1 ,2 )
__SCREAMING_SNAKE_CASE = image.repeat(2 ,1 ,1 ,1 )
__SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" )
__SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = [round(lowerCamelCase ,4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(lowerCamelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = VaeImageProcessor(do_resize=lowerCamelCase ,do_normalize=lowerCamelCase )
__SCREAMING_SNAKE_CASE = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs_by_type(lowerCamelCase ,input_image_type="""pt""" ) )[0]
__SCREAMING_SNAKE_CASE = components["""vae"""]
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs_by_type(lowerCamelCase ,input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__SCREAMING_SNAKE_CASE = vae.encode(inputs[image_param] ).latent_dist.mode()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase )[0]
__SCREAMING_SNAKE_CASE = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCamelCase ,1E-4 ,"""passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : int ,lowerCamelCase : int=0 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase )
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" ,safety_checker=lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = self.get_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" ,safety_checker=lowerCamelCase )
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = self.get_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" ,safety_checker=lowerCamelCase )
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = self.get_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
__SCREAMING_SNAKE_CASE = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
def callback_fn(lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : torch.FloatTensor ) -> None:
__SCREAMING_SNAKE_CASE = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__SCREAMING_SNAKE_CASE = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
__SCREAMING_SNAKE_CASE = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__SCREAMING_SNAKE_CASE = latents[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" ,safety_checker=lowerCamelCase ,torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = self.get_inputs()
pipe(**lowerCamelCase ,callback=lowerCamelCase ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" ,safety_checker=lowerCamelCase ,torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE = self.get_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__SCREAMING_SNAKE_CASE = inputs["""image"""].resize((504, 504) )
__SCREAMING_SNAKE_CASE = """timbrooks/instruct-pix2pix"""
__SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCamelCase ,safety_checker=lowerCamelCase ,)
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = output.images[0]
__SCREAMING_SNAKE_CASE = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
__SCREAMING_SNAKE_CASE = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 109 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Optional[int] ) -> Dict:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : int ,__a : List[str]="attention" ) -> List[str]:
"""simple docstring"""
_a : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Tuple = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_a : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Any = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_a : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Union[str, Any] ,__a : List[Any] ,__a : Any=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : List[str] = (wi_a, wi_a)
else:
_a : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : str ) -> List[str]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCAmelCase ( __a : dict ,*, __a : int ,__a : bool ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : Dict = traverse_util.flatten_dict(variables['''target'''] )
_a : Any = {'''/'''.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : Optional[int] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,__a )
_a : Tuple = collections.OrderedDict()
# Shared embeddings.
_a : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Optional[Any] = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_attention_layer_norm''' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(__a ,__a ,'''encoder''' ,'''attention''' )
_a : List[str] = layer_norm
_a : Optional[Any] = k.T
_a : str = o.T
_a : List[Any] = q.T
_a : Tuple = v.T
# Block i, layer 1 (MLP).
_a : str = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Any = tax_mlp_lookup(__a ,__a ,'''encoder''' ,__a )
_a : str = layer_norm
if split_mlp_wi:
_a : List[Any] = wi[0].T
_a : Any = wi[1].T
else:
_a : Any = wi.T
_a : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Dict = tax_relpos_bias_lookup(
__a ,__a ,'''encoder''' ).T
_a : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_a : List[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''encoder''' ).T
_a : Optional[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Union[str, Any] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_a , _a , _a , _a : Optional[Any] = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''self_attention''' )
_a : Optional[Any] = layer_norm
_a : Dict = k.T
_a : str = o.T
_a : str = q.T
_a : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_a : Any = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_a , _a , _a , _a : str = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''encoder_decoder_attention''' )
_a : Optional[Any] = layer_norm
_a : Optional[int] = k.T
_a : Dict = o.T
_a : str = q.T
_a : int = v.T
# Block i, layer 2 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Tuple = tax_mlp_lookup(__a ,__a ,'''decoder''' ,__a )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : List[Any] = wi[1].T
else:
_a : Dict = wi.T
_a : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Tuple = tax_relpos_bias_lookup(__a ,__a ,'''decoder''' ).T
_a : Tuple = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : Any = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( __a : Dict ,__a : bool ) -> Tuple:
"""simple docstring"""
_a : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_a : str = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
_a : List[str] = checkpoints.load_tax_checkpoint(__a )
_a : str = convert_tax_to_pytorch(
__a ,num_layers=config.num_layers ,is_encoder_only=__a ,scalable_attention=__a )
_a : str = make_state_dict(__a ,__a )
model.load_state_dict(__a ,strict=__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ,__a : Union[str, Any] ,__a : bool = False ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = MTaConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(__a )
else:
_a : Tuple = UMTaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a ,__a ,__a ,__a ,__a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 14 | 0 |
"""simple docstring"""
from collections import defaultdict
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(_snake_case )
if ret % 2 == 0:
cuts.append(_snake_case )
return ret
def lowerCamelCase ( ):
dfs(1 )
if __name__ == "__main__":
UpperCamelCase__ , UpperCamelCase__ = 10, 9
UpperCamelCase__ = defaultdict(list)
UpperCamelCase__ = {}
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 110 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a__ = '''Usage of script: script_name <size_of_canvas:int>'''
a__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( __a : int ) -> list[list[bool]]:
"""simple docstring"""
_a : int = [[False for i in range(__a )] for j in range(__a )]
return canvas
def __UpperCAmelCase ( __a : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
_a : Optional[int] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( __a : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
_a : Any = np.array(__a )
_a : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
_a : Tuple = __judge_point(
__a ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( __a : bool ,__a : list[list[bool]] ) -> bool:
"""simple docstring"""
_a : Optional[Any] = 0
_a : str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a : Optional[int] = pt
if pt:
if alive < 2:
_a : Dict = False
elif alive == 2 or alive == 3:
_a : Optional[Any] = True
elif alive > 3:
_a : str = False
else:
if alive == 3:
_a : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a__ = int(sys.argv[1])
# main working structure of this module.
a__ = create_canvas(canvas_size)
seed(c)
a__ , a__ = plt.subplots()
fig.show()
a__ = ListedColormap(['''w''', '''k'''])
try:
while True:
a__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 14 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = tmp_path / '''cache'''
__A = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__A = TextDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_text_dataset(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[str]:
"""simple docstring"""
__A = tmp_path / '''cache'''
__A = {'''text''': '''string'''}
__A = features.copy() if features else default_expected_features
__A = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
__A = TextDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_text_dataset(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> List[Any]:
"""simple docstring"""
__A = tmp_path / '''cache'''
__A = {'''text''': '''string'''}
__A = TextDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_text_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
if issubclass(__a , __a ):
__A = text_path
elif issubclass(__a , __a ):
__A = [text_path]
__A = tmp_path / '''cache'''
__A = {'''text''': '''string'''}
__A = TextDatasetReader(__a , cache_dir=__a ).read()
_check_text_dataset(__a , __a )
def UpperCAmelCase ( a_ , a_ , a_=("train",) ) -> List[Any]:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
__A = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Any:
"""simple docstring"""
__A = tmp_path / '''cache'''
__A = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__A = TextDatasetReader({"train": text_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_text_datasetdict(__a , __a )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__A = {'''text''': '''string'''}
__A = features.copy() if features else default_expected_features
__A = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
__A = TextDatasetReader({"train": text_path} , features=__a , cache_dir=__a ).read()
_check_text_datasetdict(__a , __a )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCAmelCase ( a_ , a_ , a_ ) -> Dict:
"""simple docstring"""
if split:
__A = {split: text_path}
else:
__A = '''train'''
__A = {'''train''': text_path, '''test''': text_path}
__A = tmp_path / '''cache'''
__A = {'''text''': '''string'''}
__A = TextDatasetReader(__a , cache_dir=__a ).read()
_check_text_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 55 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__ ( self ):
_A = 1
_A = 3
_A = (32, 32)
_A = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(_a )
@property
def lowerCAmelCase__ ( self ):
def extract(*snake_case_ , **snake_case_ ):
class lowerCamelCase:
'''simple docstring'''
def __init__( self ):
_A = torch.ones([0] )
def lowerCAmelCase__ ( self , snake_case_ ):
self.pixel_values.to(_a )
return self
return Out()
return extract
def lowerCAmelCase__ ( self ):
_A = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_A = self.dummy_cond_unet
_A = PNDMScheduler(skip_prk_steps=_a )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_A = 77
_A = self.dummy_image.to(_a )
_A = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
_A = AltDiffusionImgaImgPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
_A = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_a )
_A = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
_A = '''A painting of a squirrel eating a burger'''
_A = torch.Generator(device=_a ).manual_seed(0 )
_A = alt_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_a , )
_A = output.images
_A = torch.Generator(device=_a ).manual_seed(0 )
_A = alt_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_a , return_dict=_a , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase__ ( self ):
_A = self.dummy_cond_unet
_A = PNDMScheduler(skip_prk_steps=_a )
_A = self.dummy_vae
_A = self.dummy_text_encoder
_A = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_A = 77
_A = self.dummy_image.to(_a )
# put models in fp16
_A = unet.half()
_A = vae.half()
_A = bert.half()
# make sure here that pndm scheduler skips prk
_A = AltDiffusionImgaImgPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
_A = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_a )
_A = alt_pipe.to(_a )
alt_pipe.set_progress_bar_config(disable=_a )
_A = '''A painting of a squirrel eating a burger'''
_A = torch.manual_seed(0 )
_A = alt_pipe(
[prompt] , generator=_a , num_inference_steps=2 , output_type='np' , image=_a , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase__ ( self ):
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
_A = init_image.resize((760, 504) )
_A = '''BAAI/AltDiffusion'''
_A = AltDiffusionImgaImgPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A = '''A fantasy landscape, trending on artstation'''
_A = torch.manual_seed(0 )
_A = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , generator=_a , output_type='np' , )
_A = output.images[0]
_A = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
_A = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ):
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_A = init_image.resize((768, 512) )
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
_A = '''BAAI/AltDiffusion'''
_A = AltDiffusionImgaImgPipeline.from_pretrained(
_a , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_A = '''A fantasy landscape, trending on artstation'''
_A = torch.manual_seed(0 )
_A = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , generator=_a , output_type='np' , )
_A = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 27 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( __lowercase):
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(_a , '''depth_multiplier''' ) )
class lowerCAmelCase :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=0.25 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE="relu6" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=None , ) -> Any:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = min_depth
__snake_case = tf_padding
__snake_case = int(last_hidden_size * depth_multiplier )
__snake_case = output_stride
__snake_case = hidden_act
__snake_case = classifier_dropout_prob
__snake_case = use_labels
__snake_case = is_training
__snake_case = num_labels
__snake_case = initializer_range
__snake_case = scope
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.num_labels )
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = MobileNetVaModel(config=_a )
model.to(_a )
model.eval()
__snake_case = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = MobileNetVaForImageClassification(_a )
model.to(_a )
model.eval()
__snake_case = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __lowercase , __lowercase , unittest.TestCase):
__lowercase : str = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
__lowercase : Union[str, Any] = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Dict = False
__lowercase : List[Any] = False
__lowercase : Tuple = False
__lowercase : str = False
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = MobileNetVaModelTester(self )
__snake_case = MobileNetVaConfigTester(self , config_class=_a , has_text_modality=_a )
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(_a )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__snake_case = model(**self._prepare_for_class(_a , _a ) )
__snake_case = outputs.hidden_states
__snake_case = 26
self.assertEqual(len(_a ) , _a )
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(_a , _a , _a )
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = MobileNetVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _UpperCamelCase ()-> str:
'''simple docstring'''
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase):
@cached_property
def lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(_a )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__snake_case = model(**_a )
# verify the logits
__snake_case = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _a )
__snake_case = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 24 |
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : List[str] = F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __lowercase ):
_lowercase ="beit"
def __init__( self , _UpperCamelCase=8_192 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3_072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-1_2 , _UpperCamelCase=224 , _UpperCamelCase=16 , _UpperCamelCase=3 , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=True , _UpperCamelCase=[3, 5, 7, 11] , _UpperCamelCase=[1, 2, 3, 6] , _UpperCamelCase=True , _UpperCamelCase=0.4 , _UpperCamelCase=256 , _UpperCamelCase=1 , _UpperCamelCase=False , _UpperCamelCase=255 , **_UpperCamelCase , ) -> Tuple:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = use_mask_token
lowerCAmelCase_ = use_absolute_position_embeddings
lowerCAmelCase_ = use_relative_position_bias
lowerCAmelCase_ = use_shared_relative_position_bias
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase_ = out_indices
lowerCAmelCase_ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase_ = use_auxiliary_head
lowerCAmelCase_ = auxiliary_loss_weight
lowerCAmelCase_ = auxiliary_channels
lowerCAmelCase_ = auxiliary_num_convs
lowerCAmelCase_ = auxiliary_concat_input
lowerCAmelCase_ = semantic_loss_ignore_index
class _lowerCAmelCase ( __lowercase ):
_lowercase =version.parse('''1.11''' )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __a ( self ) -> float:
return 1e-4
| 290 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ,__a : Any ) -> int:
"""simple docstring"""
_a : int = 0
if start < end:
_a : Tuple = randint(__a ,__a )
_a : Tuple = a[end]
_a : List[str] = a[pivot]
_a : Any = temp
_a , _a : Optional[int] = _in_place_partition(__a ,__a ,__a )
count += _in_place_quick_sort(__a ,__a ,p - 1 )
count += _in_place_quick_sort(__a ,p + 1 ,__a )
return count
def __UpperCAmelCase ( __a : List[Any] ,__a : Tuple ,__a : Dict ) -> Dict:
"""simple docstring"""
_a : Dict = 0
_a : Tuple = randint(__a ,__a )
_a : List[Any] = a[end]
_a : str = a[pivot]
_a : str = temp
_a : Dict = start - 1
for index in range(__a ,__a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : int = new_pivot_index + 1
_a : Any = a[new_pivot_index]
_a : Optional[int] = a[index]
_a : str = temp
_a : Union[str, Any] = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : Any = temp
return new_pivot_index + 1, count
a__ = TemporaryFile()
a__ = 100 # 1000 elements are to be sorted
a__ , a__ = 0, 1 # mean and standard deviation
a__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ = np.load(outfile)
a__ = len(M) - 1
a__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 14 | 0 |
'''simple docstring'''
from __future__ import annotations
_lowerCamelCase = list[list[int]]
# assigning initial values to the grid
_lowerCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_lowerCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a__ ( _SCREAMING_SNAKE_CASE : Matrix , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a__ ( _SCREAMING_SNAKE_CASE : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a__ ( _SCREAMING_SNAKE_CASE : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__a ):
UpperCAmelCase_ : Optional[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
UpperCAmelCase_ : List[str] = digit
if sudoku(__a ) is not None:
return grid
UpperCAmelCase_ : Optional[int] = 0
return None
def a__ ( _SCREAMING_SNAKE_CASE : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__a , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
_lowerCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 71 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14 | 0 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : Optional[int] = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class _a ( __lowercase):
"""simple docstring"""
UpperCamelCase__ = "owlvit_text_model"
def __init__( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=4_9_4_0_8 , __UpperCamelCase : List[str]=5_1_2 , __UpperCamelCase : Dict=2_0_4_8 , __UpperCamelCase : Any=1_2 , __UpperCamelCase : Optional[Any]=8 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : Optional[Any]="quick_gelu" , __UpperCamelCase : str=1e-5 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Union[str, Any]=1.0 , __UpperCamelCase : Any=0 , __UpperCamelCase : int=4_9_4_0_6 , __UpperCamelCase : Any=4_9_4_0_7 , **__UpperCamelCase : Optional[Any] , )->int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_act
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_factor
@classmethod
def lowercase__ ( cls : Union[str, Any] , __UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Optional[int] )->"PretrainedConfig":
cls._set_token_in_kwargs(_a )
_UpperCAmelCase = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_UpperCAmelCase = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a , **_a )
class _a ( __lowercase):
"""simple docstring"""
UpperCamelCase__ = "owlvit_vision_model"
def __init__( self : Optional[Any] , __UpperCamelCase : List[str]=7_6_8 , __UpperCamelCase : str=3_0_7_2 , __UpperCamelCase : Dict=1_2 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : Union[str, Any]=7_6_8 , __UpperCamelCase : Any=3_2 , __UpperCamelCase : Union[str, Any]="quick_gelu" , __UpperCamelCase : Optional[Any]=1e-5 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : Any=1.0 , **__UpperCamelCase : Dict , )->Dict:
super().__init__(**_a )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_factor
@classmethod
def lowercase__ ( cls : Optional[int] , __UpperCamelCase : Dict , **__UpperCamelCase : List[Any] )->"PretrainedConfig":
cls._set_token_in_kwargs(_a )
_UpperCAmelCase = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_UpperCAmelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a , **_a )
class _a ( __lowercase):
"""simple docstring"""
UpperCamelCase__ = "owlvit"
UpperCamelCase__ = True
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=5_1_2 , __UpperCamelCase : Union[str, Any]=2.6_5_9_2 , __UpperCamelCase : str=True , **__UpperCamelCase : Dict , )->Any:
super().__init__(**_a )
if text_config is None:
_UpperCAmelCase = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
_UpperCAmelCase = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
_UpperCAmelCase = OwlViTTextConfig(**_a )
_UpperCAmelCase = OwlViTVisionConfig(**_a )
_UpperCAmelCase = projection_dim
_UpperCAmelCase = logit_scale_init_value
_UpperCAmelCase = return_dict
_UpperCAmelCase = 1.0
@classmethod
def lowercase__ ( cls : Any , __UpperCamelCase : List[str] , **__UpperCamelCase : Optional[int] )->"PretrainedConfig":
cls._set_token_in_kwargs(_a )
_UpperCAmelCase = cls.get_config_dict(_a , **_a )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a , **_a )
@classmethod
def lowercase__ ( cls : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , **__UpperCamelCase : Union[str, Any] )->Dict:
_UpperCAmelCase = {}
_UpperCAmelCase = text_config
_UpperCAmelCase = vision_config
return cls.from_dict(_a , **_a )
def lowercase__ ( self : Optional[int] )->Union[str, Any]:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.text_config.to_dict()
_UpperCAmelCase = self.vision_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class _a ( __lowercase):
"""simple docstring"""
@property
def lowercase__ ( self : List[Any] )->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def lowercase__ ( self : int )->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def lowercase__ ( self : str )->float:
return 1e-4
def lowercase__ ( self : str , __UpperCamelCase : int , __UpperCamelCase : List[Any] = -1 , __UpperCamelCase : List[Any] = -1 , __UpperCamelCase : int = None , )->Mapping[str, Any]:
_UpperCAmelCase = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_a , seq_length=_a , framework=_a )
_UpperCAmelCase = super().generate_dummy_inputs(
processor.image_processor , batch_size=_a , framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def lowercase__ ( self : Dict )->int:
return 1_4
| 602 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14 | 0 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class lowerCamelCase_( __lowercase ):
'''simple docstring'''
def __init__( self , **lowerCamelCase__ ):
requires_backends(self , ['''bs4'''] )
super().__init__(**_a )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
_lowerCamelCase = parent.find_all(child.name , recursive=_a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_a ) else next(i for i, s in enumerate(_a , 1 ) if s is child ) )
_lowerCamelCase = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = BeautifulSoup(_a , '''html.parser''' )
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
for element in html_code.descendants:
if type(_a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
_lowerCamelCase = html.unescape(_a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_a )
_lowerCamelCase = self.xpath_soup(_a )
stringaxtag_seq.append(_a )
stringaxsubs_seq.append(_a )
if len(_a ) != len(_a ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_a ) != len(_a ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = ''''''
for tagname, subs in zip(_a , _a ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , lowerCamelCase__ ):
_lowerCamelCase = False
# Check that strings has a valid type
if isinstance(_a , _a ):
_lowerCamelCase = True
elif isinstance(_a , (list, tuple) ):
if len(_a ) == 0 or isinstance(html_strings[0] , _a ):
_lowerCamelCase = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F"""but is of type {type(_a )}.""" )
_lowerCamelCase = bool(isinstance(_a , (list, tuple) ) and (isinstance(html_strings[0] , _a )) )
if not is_batched:
_lowerCamelCase = [html_strings]
# Get nodes + xpaths
_lowerCamelCase = []
_lowerCamelCase = []
for html_string in html_strings:
_lowerCamelCase = self.get_three_from_single(_a )
nodes.append(_a )
_lowerCamelCase = []
for node, tag_list, sub_list in zip(_a , _a , _a ):
_lowerCamelCase = self.construct_xpath(_a , _a )
xpath_strings.append(_a )
xpaths.append(_a )
# return as Dict
_lowerCamelCase = {'''nodes''': nodes, '''xpaths''': xpaths}
_lowerCamelCase = BatchFeature(data=_a , tensor_type=_a )
return encoded_inputs
| 661 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 0 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_lowercase = 5_00_03
_lowercase = 5_00_02
@require_sentencepiece
@require_tokenizers
class __A ( __lowercase , unittest.TestCase ):
UpperCamelCase :Dict = PLBartTokenizer
UpperCamelCase :str = None
UpperCamelCase :Tuple = False
def _snake_case (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : int = PLBartTokenizer(_a , language_codes="""base""" , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case (self ):
lowerCamelCase__ : str = PLBartTokenizer(_a , language_codes="""base""" , keep_accents=_a )
lowerCamelCase__ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase__ : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase__ : int = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
lowerCamelCase__ : Any = tokenizer.vocab_size
lowerCamelCase__ : Any = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 4 , _a )]
self.assertListEqual(_a , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
lowerCamelCase__ : List[Any] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowerCamelCase__ : str = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
def _snake_case (self ):
lowerCamelCase__ : int = PLBartTokenizer(_a , language_codes="""multi""" , keep_accents=_a )
lowerCamelCase__ : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase__ : Tuple = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
lowerCamelCase__ : Optional[Any] = tokenizer.vocab_size
lowerCamelCase__ : Dict = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 7 , _a )]
self.assertListEqual(
_a , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
lowerCamelCase__ : Optional[Any] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
lowerCamelCase__ : str = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
UpperCamelCase :List[Any] = "uclanlp/plbart-python-en_XX"
UpperCamelCase :int = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
UpperCamelCase :Optional[int] = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
UpperCamelCase :Optional[int] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _snake_case (cls ):
lowerCamelCase__ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
lowerCamelCase__ : Optional[int] = 1
return cls
def _snake_case (self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 50003 )
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _a )
def _snake_case (self ):
self.assertIn(_a , self.tokenizer.all_special_ids )
lowerCamelCase__ : Union[str, Any] = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
lowerCamelCase__ : Tuple = self.tokenizer.decode(_a , skip_special_tokens=_a )
lowerCamelCase__ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
self.assertNotIn(self.tokenizer.eos_token , _a )
def _snake_case (self ):
lowerCamelCase__ : Dict = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , _a )
lowerCamelCase__ : Optional[Any] = 10
lowerCamelCase__ : List[str] = self.tokenizer(_a , max_length=_a , truncation=_a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _a )
self.assertEqual(len(_a ) , _a )
def _snake_case (self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [50004, 50001] )
def _snake_case (self ):
lowerCamelCase__ : int = tempfile.mkdtemp()
lowerCamelCase__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_a )
lowerCamelCase__ : Tuple = PLBartTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _a )
@require_torch
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_a , return_tensors="""pt""" )
lowerCamelCase__ : Tuple = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _snake_case (self ):
lowerCamelCase__ : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_a , truncation=_a , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCamelCase__ : str = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowerCamelCase__ : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _snake_case (self ):
lowerCamelCase__ : List[str] = self.tokenizer(self.src_text , padding=_a , truncation=_a , max_length=3 , return_tensors="""pt""" )
lowerCamelCase__ : str = self.tokenizer(
text_target=self.tgt_text , padding=_a , truncation=_a , max_length=10 , return_tensors="""pt""" )
lowerCamelCase__ : Optional[Any] = targets['''input_ids''']
lowerCamelCase__ : str = shift_tokens_right(_a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _snake_case (self ):
lowerCamelCase__ : Optional[Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(_a ) , {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50001,
} , )
| 157 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 0 |
from __future__ import annotations
def a__ ( _UpperCamelCase : list[int] ,_UpperCamelCase : int ):
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = sum(__a )
create_state_space_tree(__a ,__a ,__a ,__a ,__a ,__a )
return result
def a__ ( _UpperCamelCase : list[int] ,_UpperCamelCase : int ,_UpperCamelCase : int ,_UpperCamelCase : list[int] ,_UpperCamelCase : list[list[int]] ,_UpperCamelCase : int ,):
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a ,len(__a ) ):
create_state_space_tree(
__a ,__a ,index + 1 ,[*path, nums[index]] ,__a ,remaining_nums_sum - nums[index] ,)
a_ = [3, 34, 4, 12, 5, 2]
a_ = 9
a_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 175 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 | 0 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCAmelCase__ = '''__DUMMY_TRANSFORMERS_USER__'''
UpperCAmelCase__ = '''Dummy User'''
UpperCAmelCase__ = '''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'''
UpperCAmelCase__ = '''https://hub-ci.huggingface.co'''
UpperCAmelCase__ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
UpperCAmelCase__ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
UpperCAmelCase__ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE',__a )
@pytest.fixture
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_ENDPOINT',__a )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL',__a )
@pytest.fixture
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token',__a )
@pytest.fixture
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Optional[int],_SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
HfFolder.save_token(__a )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def UpperCAmelCase__( ):
"""simple docstring"""
return HfApi(endpoint=__a )
@pytest.fixture(scope='session' )
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : HfApi ):
"""simple docstring"""
__A= HfFolder.get_token()
HfFolder.save_token(__a )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__a )
@pytest.fixture
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
def _cleanup_repo(_SCREAMING_SNAKE_CASE : int ):
hf_api.delete_repo(__a,token=__a,repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
@contextmanager
def _temporary_repo(_SCREAMING_SNAKE_CASE : Optional[Any] ):
try:
yield repo_id
finally:
cleanup_repo(__a )
return _temporary_repo
@pytest.fixture(scope='session' )
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : HfApi,_SCREAMING_SNAKE_CASE : str,_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__A= f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__A= f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__a,token=__a,repo_type='dataset',private=__a )
hf_api.upload_file(
token=__a,path_or_fileobj=str(__a ),path_in_repo='data/text_data.txt',repo_id=__a,repo_type='dataset',)
yield repo_id
try:
hf_api.delete_repo(__a,token=__a,repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int,_SCREAMING_SNAKE_CASE : List[Any],_SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : HfApi,_SCREAMING_SNAKE_CASE : str,_SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
__A= f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__A= f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__a,token=__a,repo_type='dataset',private=__a )
hf_api.upload_file(
token=__a,path_or_fileobj=str(__a ),path_in_repo='data.zip',repo_id=__a,repo_type='dataset',)
yield repo_id
try:
hf_api.delete_repo(__a,token=__a,repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Optional[Any],_SCREAMING_SNAKE_CASE : Tuple,_SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : HfApi,_SCREAMING_SNAKE_CASE : Any,_SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__A= f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__A= f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__a,token=__a,repo_type='dataset',private=__a )
hf_api.upload_file(
token=__a,path_or_fileobj=str(__a ),path_in_repo='data.zip',repo_id=__a,repo_type='dataset',)
yield repo_id
try:
hf_api.delete_repo(__a,token=__a,repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : List[Any],_SCREAMING_SNAKE_CASE : Optional[Any],_SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 186 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.