code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = """altclip_text_model"""
def __init__( self , lowercase_=25_0002 , lowercase_=1024 , lowercase_=24 , lowercase_=16 , lowercase_=4096 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=514 , lowercase_=1 , lowercase_=0.02 , lowercase_=0.02 , lowercase_=1E-0_5 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_="absolute" , lowercase_=True , lowercase_=768 , **lowercase_ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : int = initializer_factor
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : int = position_embedding_type
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : Union[str, Any] = project_dim
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """altclip_vision_model"""
def __init__( self , lowercase_=768 , lowercase_=3072 , lowercase_=512 , lowercase_=12 , lowercase_=12 , lowercase_=3 , lowercase_=224 , lowercase_=32 , lowercase_="quick_gelu" , lowercase_=1E-5 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Tuple = projection_dim
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Optional[Any] = initializer_factor
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : str = hidden_act
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
UpperCAmelCase_ : List[str] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = """altclip"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=768 , lowercase_=2.65_92 , **lowercase_ ):
"""simple docstring"""
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
UpperCAmelCase_ : Dict = kwargs.pop("text_config_dict" , lowercase_ )
UpperCAmelCase_ : List[Any] = kwargs.pop("vision_config_dict" , lowercase_ )
super().__init__(**lowercase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
UpperCAmelCase_ : Union[str, Any] = {}
# This is the complete result when using `text_config_dict`.
UpperCAmelCase_ : int = AltCLIPTextConfig(**lowercase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
UpperCAmelCase_ : int = (
F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
F"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
UpperCAmelCase_ : Optional[int] = (
F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
F"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(lowercase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
UpperCAmelCase_ : Optional[int] = {}
# This is the complete result when using `vision_config_dict`.
UpperCAmelCase_ : int = AltCLIPVisionConfig(**lowercase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
UpperCAmelCase_ : Dict = {
str(lowercase_ ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
UpperCAmelCase_ : Optional[Any] = (
F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
UpperCAmelCase_ : List[str] = (
F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
F"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(lowercase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
UpperCAmelCase_ : str = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
UpperCAmelCase_ : str = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
UpperCAmelCase_ : Tuple = AltCLIPTextConfig(**lowercase_ )
UpperCAmelCase_ : List[str] = AltCLIPVisionConfig(**lowercase_ )
UpperCAmelCase_ : Optional[int] = projection_dim
UpperCAmelCase_ : str = logit_scale_init_value
UpperCAmelCase_ : Union[str, Any] = 1.0
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : int = self.text_config.to_dict()
UpperCAmelCase_ : Union[str, Any] = self.vision_config.to_dict()
UpperCAmelCase_ : Dict = self.__class__.model_type
return output
| 61 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
import cva
import numpy as np
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ ) -> Optional[Any]:
if k in (0.04, 0.06):
__UpperCamelCase =k
__UpperCamelCase =window_size
else:
raise ValueError('invalid k value' )
def __str__( self ) -> str:
return str(self.k )
def _a ( self , A_ ) -> tuple[cva.Mat, list[list[int]]]:
__UpperCamelCase =cva.imread(A_ , 0 )
__UpperCamelCase , __UpperCamelCase =img.shape
__UpperCamelCase =[]
__UpperCamelCase =img.copy()
__UpperCamelCase =cva.cvtColor(A_ , cva.COLOR_GRAY2RGB )
__UpperCamelCase , __UpperCamelCase =np.gradient(A_ )
__UpperCamelCase =dx**2
__UpperCamelCase =dy**2
__UpperCamelCase =dx * dy
__UpperCamelCase =0.04
__UpperCamelCase =self.window_size // 2
for y in range(A_ , h - offset ):
for x in range(A_ , w - offset ):
__UpperCamelCase =ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCamelCase =iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCamelCase =ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCamelCase =(wxx * wyy) - (wxy**2)
__UpperCamelCase =wxx + wyy
__UpperCamelCase =det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_A = HarrisCorner(0.04, 3)
_A , _A = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 62 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : List[str] ) -> List[str]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _lowerCamelCase ( lowercase : dict[int, list[int]] ) -> list[tuple[int, int]]:
_a = 0
_a = len(lowercase ) # No of vertices in graph
_a = [0] * n
_a = [False] * n
def dfs(lowercase : Optional[Any] , lowercase : int , lowercase : Optional[Any] , lowercase : Optional[Any] ):
_a = True
_a = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(lowercase , lowercase , lowercase , id_ )
_a = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
_a = min(low[at] , low[to] )
_a = []
for i in range(lowercase ):
if not visited[i]:
dfs(lowercase , -1 , lowercase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A_ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : List[str] , snake_case__ : str=None , snake_case__ : Optional[int]=None , snake_case__ : List[Any]=None , snake_case__ : int=None , snake_case__ : Union[str, Any]=None , snake_case__ : Tuple=None , ):
"""simple docstring"""
if attention_mask is None:
_snake_case : Dict = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_snake_case : Tuple = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_snake_case : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase:
'''simple docstring'''
def __init__( self: Dict, a_: Any, a_: str=13, a_: int=7, a_: Any=True, a_: Union[str, Any]=False, a_: str=99, a_: List[Any]=16, a_: Optional[Any]=2, a_: int=4, a_: int=4, a_: Dict="gelu", a_: int=0.1, a_: Dict=0.1, a_: Tuple=32, a_: Optional[int]=2, a_: Any=1, a_: List[str]=0, a_: Union[str, Any]=0.02, ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Tuple = batch_size
_snake_case : int = seq_length
_snake_case : Optional[int] = is_training
_snake_case : Tuple = use_labels
_snake_case : List[str] = vocab_size
_snake_case : Dict = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Tuple = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : Tuple = max_position_embeddings
_snake_case : Any = eos_token_id
_snake_case : List[str] = pad_token_id
_snake_case : Dict = bos_token_id
_snake_case : Tuple = initializer_range
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
_snake_case : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
_snake_case : List[str] = shift_tokens_right(a_, 1, 2 )
_snake_case : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=a_, )
_snake_case : Tuple = prepare_blenderbot_inputs_dict(a_, a_, a_ )
return config, inputs_dict
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case , _snake_case : Tuple = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any], a_: Any, a_: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = 20
_snake_case : List[str] = model_class_name(a_ )
_snake_case : Optional[Any] = model.encode(inputs_dict["""input_ids"""] )
_snake_case , _snake_case : Optional[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case : Tuple = model.init_cache(decoder_input_ids.shape[0], a_, a_ )
_snake_case : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="""i4""" )
_snake_case : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
_snake_case : List[str] = model.decode(
decoder_input_ids[:, :-1], a_, decoder_attention_mask=a_, past_key_values=a_, decoder_position_ids=a_, )
_snake_case : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="""i4""" )
_snake_case : Tuple = model.decode(
decoder_input_ids[:, -1:], a_, decoder_attention_mask=a_, past_key_values=outputs_cache.past_key_values, decoder_position_ids=a_, )
_snake_case : List[str] = model.decode(a_, a_ )
_snake_case : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=f"Max diff is {diff}" )
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any], a_: List[Any], a_: Any ):
'''simple docstring'''
_snake_case : Dict = 20
_snake_case : Optional[int] = model_class_name(a_ )
_snake_case : Optional[Any] = model.encode(inputs_dict["""input_ids"""] )
_snake_case , _snake_case : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
_snake_case : List[Any] = model.init_cache(decoder_input_ids.shape[0], a_, a_ )
_snake_case : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
_snake_case : str = model.decode(
decoder_input_ids[:, :-1], a_, decoder_attention_mask=a_, past_key_values=a_, decoder_position_ids=a_, )
_snake_case : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="""i4""" )
_snake_case : Optional[int] = model.decode(
decoder_input_ids[:, -1:], a_, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=a_, decoder_position_ids=a_, )
_snake_case : Optional[int] = model.decode(a_, a_, decoder_attention_mask=a_ )
_snake_case : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3, msg=f"Max diff is {diff}" )
@require_flax
class lowercase( unittest.TestCase ):
'''simple docstring'''
lowercase__ = 99
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
_snake_case : Any = input_ids.shape[0]
_snake_case : int = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Dict = self._get_config_and_data()
_snake_case : List[str] = FlaxBlenderbotForConditionalGeneration(a_ )
_snake_case : str = lm_model(input_ids=a_ )
_snake_case : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
_snake_case : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(a_ )
_snake_case : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa )
_snake_case : Dict = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa )
_snake_case : Optional[Any] = lm_model(input_ids=a_, decoder_input_ids=a_ )
_snake_case : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape, a_ )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : str = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa )
_snake_case : List[str] = shift_tokens_right(a_, 1, 2 )
_snake_case : int = np.equal(a_, 1 ).astype(np.floataa ).sum()
_snake_case : Optional[int] = np.equal(a_, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(a_, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class lowercase( __a , unittest.TestCase , __a ):
'''simple docstring'''
lowercase__ = True
lowercase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = FlaxBlenderbotModelTester(self )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(a_, a_, a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(a_, a_, a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case , _snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case : List[Any] = self._prepare_for_class(a_, a_ )
_snake_case : List[Any] = model_class(a_ )
@jax.jit
def encode_jitted(a_: str, a_: Optional[Any]=None, **a_: List[str] ):
return model.encode(input_ids=a_, attention_mask=a_ )
with self.subTest("""JIT Enabled""" ):
_snake_case : Optional[Any] = encode_jitted(**a_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case : int = encode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ), len(a_ ) )
for jitted_output, output in zip(a_, a_ ):
self.assertEqual(jitted_output.shape, output.shape )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case : int = model_class(a_ )
_snake_case : int = model.encode(inputs_dict["""input_ids"""], inputs_dict["""attention_mask"""] )
_snake_case : Optional[Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(a_: Any, a_: Tuple, a_: int ):
return model.decode(
decoder_input_ids=a_, decoder_attention_mask=a_, encoder_outputs=a_, )
with self.subTest("""JIT Enabled""" ):
_snake_case : Any = decode_jitted(**a_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case : List[str] = decode_jitted(**a_ ).to_tuple()
self.assertEqual(len(a_ ), len(a_ ) )
for jitted_output, output in zip(a_, a_ ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_snake_case : List[Any] = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_snake_case : Optional[int] = np.ones((1, 1) ) * model.config.eos_token_id
_snake_case : Union[str, Any] = model(a_ )
self.assertIsNotNone(a_ )
@unittest.skipUnless(jax_device != """cpu""", """3B test too slow on CPU.""" )
@slow
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
_snake_case : str = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
_snake_case : List[str] = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""", from_pt=a_ )
_snake_case : Union[str, Any] = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
_snake_case : str = ["""Sam"""]
_snake_case : Tuple = tokenizer(a_, return_tensors="""jax""" )
_snake_case : str = model.generate(**a_, **a_ )
_snake_case : List[Any] = """Sam is a great name. It means \"sun\" in Gaelic."""
_snake_case : int = tokenizer.batch_decode(a_, **a_ )
assert generated_txt[0].strip() == tgt_text
| 64 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = 'efficientformer'
def __init__(self : Any , __UpperCAmelCase : List[int] = [3, 2, 6, 4] , __UpperCAmelCase : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , __UpperCAmelCase : List[bool] = [True, True, True, True] , __UpperCAmelCase : int = 4_4_8 , __UpperCAmelCase : int = 3_2 , __UpperCAmelCase : int = 4 , __UpperCAmelCase : int = 7 , __UpperCAmelCase : int = 5 , __UpperCAmelCase : int = 8 , __UpperCAmelCase : int = 4 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 1_6 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , __UpperCAmelCase : float = 1E-5 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : float = 1E-12 , __UpperCAmelCase : int = 2_2_4 , __UpperCAmelCase : float = 1E-05 , **__UpperCAmelCase : Dict , ) -> None:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = depths
UpperCAmelCase__ = mlp_expansion_ratio
UpperCAmelCase__ = downsamples
UpperCAmelCase__ = dim
UpperCAmelCase__ = key_dim
UpperCAmelCase__ = attention_ratio
UpperCAmelCase__ = resolution
UpperCAmelCase__ = pool_size
UpperCAmelCase__ = downsample_patch_size
UpperCAmelCase__ = downsample_stride
UpperCAmelCase__ = downsample_pad
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = num_metaad_blocks
UpperCAmelCase__ = distillation
UpperCAmelCase__ = use_layer_scale
UpperCAmelCase__ = layer_scale_init_value
UpperCAmelCase__ = image_size
UpperCAmelCase__ = batch_norm_eps
| 65 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
snake_case_ :Optional[int] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
snake_case_ :List[Any] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
snake_case_ :Dict = """The dog is cute and lives in the garden house"""
snake_case_ :Dict = jnp.array([tokenizer.encode(snake_case )] )
snake_case_ :Optional[Any] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
snake_case_ :Optional[Any] = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
snake_case_ :List[Any] = model(snake_case )["""last_hidden_state"""]
self.assertEqual(output.shape , snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case , atol=1E-3 ) )
| 66 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase =Dict[str, Any]
__UpperCAmelCase =List[Prediction]
@add_end_docstrings(UpperCAmelCase__ )
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , *a : int , **a : List[Any] ):
"""simple docstring"""
super().__init__(*a , **a )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , **a : str ):
"""simple docstring"""
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *a : str , **a : Union[str, Any] ):
"""simple docstring"""
return super().__call__(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int ):
"""simple docstring"""
__lowerCamelCase = load_image(a )
__lowerCamelCase = torch.IntTensor([[image.height, image.width]] )
__lowerCamelCase = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
__lowerCamelCase = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
__lowerCamelCase = target_size
return inputs
def SCREAMING_SNAKE_CASE__ ( self : str , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = model_inputs.pop('''target_size''' )
__lowerCamelCase = self.model(**a )
__lowerCamelCase = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
__lowerCamelCase = model_inputs['''bbox''']
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : int , a : List[str]=0.9 ):
"""simple docstring"""
__lowerCamelCase = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__lowerCamelCase , __lowerCamelCase = target_size[0].tolist()
def unnormalize(a : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
__lowerCamelCase , __lowerCamelCase = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__lowerCamelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__lowerCamelCase = [unnormalize(a ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
__lowerCamelCase = ['''score''', '''label''', '''box''']
__lowerCamelCase = [dict(zip(a , a ) ) for vals in zip(scores.tolist() , a , a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__lowerCamelCase = self.image_processor.post_process_object_detection(a , a , a )
__lowerCamelCase = raw_annotations[0]
__lowerCamelCase = raw_annotation['''scores''']
__lowerCamelCase = raw_annotation['''labels''']
__lowerCamelCase = raw_annotation['''boxes''']
__lowerCamelCase = scores.tolist()
__lowerCamelCase = [self.model.config.idalabel[label.item()] for label in labels]
__lowerCamelCase = [self._get_bounding_box(a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__lowerCamelCase = ['''score''', '''label''', '''box''']
__lowerCamelCase = [
dict(zip(a , a ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 67 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 68 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__UpperCamelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=100 , UpperCAmelCase=" " ) -> List[str]:
snake_case_ = text.split(UpperCAmelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase )]
def UpperCAmelCase ( UpperCAmelCase ) -> dict:
snake_case_ , snake_case_ = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(UpperCAmelCase ):
titles.append(title if title is not None else '' )
texts.append(UpperCAmelCase )
return {"title": titles, "text": texts}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> dict:
snake_case_ = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=UpperCAmelCase , padding='longest' , return_tensors='pt' )['input_ids']
snake_case_ = ctx_encoder(input_ids.to(device=UpperCAmelCase ) , return_dict=UpperCAmelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> int:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
snake_case_ = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
snake_case_ = dataset.map(UpperCAmelCase , batched=UpperCAmelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
snake_case_ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase )
snake_case_ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
snake_case_ = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
snake_case_ = dataset.map(
partial(UpperCAmelCase , ctx_encoder=UpperCAmelCase , ctx_tokenizer=UpperCAmelCase ) , batched=UpperCAmelCase , batch_size=processing_args.batch_size , features=UpperCAmelCase , )
# And finally save your dataset
snake_case_ = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(UpperCAmelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
snake_case_ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=UpperCAmelCase )
# And save the index
snake_case_ = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(UpperCAmelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
default=str(Path(lowerCAmelCase__ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
SCREAMING_SNAKE_CASE_ = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
SCREAMING_SNAKE_CASE_ = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=str(Path(lowerCAmelCase__ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
default=lowerCAmelCase__ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
SCREAMING_SNAKE_CASE_ = field(
default=1_6 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = field(
default=7_6_8 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
SCREAMING_SNAKE_CASE_ = field(
default=1_2_8 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__UpperCamelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__UpperCamelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 69 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
_lowercase: Optional[datasets.Features] = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
_lowercase: Tuple = PandasConfig
def lowercase__ ( self : Optional[Any] ) -> str:
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : List[str] , __snake_case : Dict ) -> int:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
_lowerCAmelCase = data_files
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) )
return splits
def lowercase__ ( self : List[Any] , __snake_case : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase = table_cast(__snake_case , self.config.features.arrow_schema )
return pa_table
def lowercase__ ( self : Dict , __snake_case : Optional[Any] ) -> Any:
for i, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
with open(__snake_case , """rb""" ) as f:
_lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(__snake_case ) )
yield i, self._cast_table(__snake_case )
| 70 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A_ :Dict = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def A ( a_ ,a_=None ) -> Union[str, Any]:
require_version(deps[pkg] ,a_ )
| 71 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 0 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = (CMStochasticIterativeScheduler,)
snake_case__ : Tuple = 1_0
def SCREAMING_SNAKE_CASE ( self : str , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[str] = {
'''num_train_timesteps''': 2_0_1,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
config.update(**__lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = 1_0
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = scheduler.timesteps[0]
_lowerCamelCase : str = scheduler.timesteps[1]
_lowerCamelCase : List[Any] = self.dummy_sample
_lowerCamelCase : str = 0.1 * sample
_lowerCamelCase : str = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
_lowerCamelCase : Tuple = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : int = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : str = 1
scheduler.set_timesteps(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Tuple = torch.manual_seed(0 )
_lowerCamelCase : int = self.dummy_model()
_lowerCamelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(__lowerCAmelCase ):
# 1. scale model input
_lowerCamelCase : Dict = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict noise residual
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , __lowerCAmelCase )
# 3. predict previous sample x_t-1
_lowerCamelCase : Any = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
_lowerCamelCase : List[str] = pred_prev_sample
_lowerCamelCase : int = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : str = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.scheduler_classes[0]
_lowerCamelCase : List[str] = self.get_scheduler_config()
_lowerCamelCase : str = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : int = [1_0_6, 0]
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
_lowerCamelCase : str = scheduler.timesteps
_lowerCamelCase : str = torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict noise residual
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , __lowerCAmelCase )
# 3. predict previous sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
_lowerCamelCase : List[str] = pred_prev_sample
_lowerCamelCase : Optional[int] = torch.sum(torch.abs(__lowerCAmelCase ) )
_lowerCamelCase : Tuple = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(__lowerCAmelCase , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.scheduler_classes[0]
_lowerCamelCase : Any = self.get_scheduler_config()
_lowerCamelCase : List[Any] = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = [3_9, 3_0, 1_2, 1, 0]
_lowerCamelCase : Optional[Any] = len(__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Dict = self.get_scheduler_config()
_lowerCamelCase : Dict = scheduler_class(**__lowerCAmelCase )
_lowerCamelCase : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
| 72 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 73 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_lowercase = '''examples/'''
_lowercase = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_lowercase = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
_lowercase = '''README.md'''
def _snake_case ( snake_case__ : str , snake_case__ : int , snake_case__ : Dict ):
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.read()
A , A = REPLACE_PATTERNS[pattern]
A = replace.replace('VERSION' , snake_case__ )
A = re_pattern.sub(snake_case__ , snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(snake_case__ )
def _snake_case ( snake_case__ : Tuple ):
for folder, directories, fnames in os.walk(snake_case__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern='examples' )
def _snake_case ( snake_case__ : List[str] , snake_case__ : List[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case__ , snake_case__ , snake_case__ )
if not patch:
update_version_in_examples(snake_case__ )
def _snake_case ( ):
A = '🤗 Transformers currently provides the following architectures'
A = '1. Want to contribute a new model?'
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
# Find the start of the list.
A = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
A = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(snake_case__ )
def _snake_case ( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
A = f.read()
A = REPLACE_PATTERNS['init'][0].search(snake_case__ ).groups()[0]
return packaging.version.parse(snake_case__ )
def _snake_case ( snake_case__ : Dict=False ):
A = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
A = default_version.base_version
elif patch:
A = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
A = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
A = input(F'Which version are you releasing? [{default_version}]' )
if len(snake_case__ ) == 0:
A = default_version
print(F'Updating version to {version}.' )
global_version_update(snake_case__ , patch=snake_case__ )
def _snake_case ( ):
A = get_version()
A = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
A = current_version.base_version
# Check with the user we got that right.
A = input(F'Which version are we developing now? [{dev_version}]' )
if len(snake_case__ ) == 0:
A = dev_version
print(F'Updating version to {version}.' )
global_version_update(snake_case__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_lowercase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 74 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
a_ : Optional[Any] = 5
a_ : str = 10
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : int =SpeechaTextTokenizer
lowercase : int =False
lowercase : List[str] =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ =sp.SentencePieceProcessor()
spm_model.Load(lowerCAmelCase )
lowerCamelCase_ =['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase ) )]
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ =Path(self.tmpdirname )
save_json(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''<pad>'''
lowerCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<s>''' )
self.assertEqual(vocab_keys[1], '''<pad>''' )
self.assertEqual(vocab_keys[-1], '''j''' )
self.assertEqual(len(lowerCAmelCase ), 1_001 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1_001 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCamelCase_ =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [289, 50, 14, 174, 386], )
lowerCamelCase_ =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCamelCase_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', )
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
lowercase : Tuple ='valhalla/s2t_mustc_multilinguial_medium'
lowercase : Dict ='C\'est trop cool'
lowercase : str ='Esto es genial'
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 11 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size, 10_000 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertIn(lowerCAmelCase, self.tokenizer.all_special_ids )
lowerCamelCase_ =[ES_CODE, 4, 1_601, 47, 7_647, 2]
lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase )
self.assertEqual(lowerCAmelCase, lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''fr'''
lowerCamelCase_ =self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0], lowerCAmelCase )
self.assertEqual(encoded[-1], self.tokenizer.eos_token_id )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] )
lowerCamelCase_ ='''es'''
self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
| 75 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 0 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : Union[str, Any] , a : List[str]=13 , a : Optional[Any]=7 , a : Tuple=True , a : Dict=True , a : List[Any]=True , a : Union[str, Any]=True , a : List[str]=99 , a : Any=32 , a : List[str]=5 , a : List[Any]=4 , a : Dict=37 , a : List[str]="gelu" , a : List[str]=0.1 , a : Dict=0.1 , a : Optional[int]=128 , a : Dict=32 , a : Any=16 , a : Optional[Any]=2 , a : Dict=0.02 , a : str=3 , a : Union[str, Any]=4 , a : str=None , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : List[str] = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : Optional[int] = scope
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCamelCase ( self : Union[str, Any] , a : str , a : int , a : List[Any] , a : Tuple , a : Optional[Any] , a : Optional[int] , a : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = NezhaModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(a , attention_mask=a , token_type_ids=a )
SCREAMING_SNAKE_CASE : List[Any] = model(a , token_type_ids=a )
SCREAMING_SNAKE_CASE : Optional[int] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self : Dict , a : Any , a : Tuple , a : Dict , a : str , a : List[str] , a : Optional[Any] , a : List[str] , a : Tuple , a : str , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = NezhaModel(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , encoder_attention_mask=a , )
SCREAMING_SNAKE_CASE : List[Any] = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , )
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self : Any , a : Any , a : str , a : str , a : Union[str, Any] , a : Union[str, Any] , a : str , a : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = NezhaForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Dict , a : List[str] , a : Dict , a : Dict , a : Any , a : str , a : Optional[int] , a : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = NezhaForNextSentencePrediction(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self : Tuple , a : Tuple , a : List[Any] , a : Tuple , a : Any , a : int , a : int , a : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = NezhaForPreTraining(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self : str , a : List[Any] , a : Tuple , a : Union[str, Any] , a : Any , a : Optional[int] , a : List[Any] , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = NezhaForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : int , a : str , a : Any , a : Union[str, Any] , a : str , a : Union[str, Any] , a : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Any = NezhaForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : Any , a : Tuple , a : List[Any] , a : Any , a : Dict , a : Union[str, Any] , a : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = NezhaForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : str , a : List[str] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : str , a : Tuple , a : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE : str = NezhaForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[int] , a : Any , a : Optional[int] , a : str=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = NezhaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=a , hidden_size=37 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a )
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
a , a , a , a , a , a , a , a , a , )
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = NezhaModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : int = model_class(config=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "bert.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "bert.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Any = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) ) | 76 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 0 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Dict = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
lowercase__ , lowercase__ : List[Any] = emb.weight.shape
lowercase__ : Union[str, Any] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
lowercase__ : Union[str, Any] = emb.weight.data
return lin_layer
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Union[str, Any] = torch.load(_lowerCAmelCase , map_location='cpu' )
lowercase__ : Any = Namespace(**checkpoint['cfg']['model'] )
lowercase__ : Any = checkpoint['model']
remove_ignore_keys_(_lowerCAmelCase )
lowercase__ : str = state_dict['decoder.embed_tokens.weight'].shape[0]
lowercase__ : Optional[Any] = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
lowercase__ : Optional[Any] = XGLMConfig(
vocab_size=_lowerCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowercase__ : Optional[int] = XGLMForCausalLM(_lowerCAmelCase )
lowercase__ : int = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
print(_lowerCAmelCase )
lowercase__ : Dict = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_UpperCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_UpperCamelCase : Union[str, Any] = parser.parse_args()
_UpperCamelCase : Any = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 77 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase = np.zeros((n + 1,) )
UpperCAmelCase = ya
UpperCAmelCase = xa
for k in range(lowercase_ ):
UpperCAmelCase = y[k] + step_size * ode_func(lowercase_ , y[k] )
UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(lowercase_ , y[k] ) + ode_func(x + step_size , lowercase_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> bool:
'''simple docstring'''
_A = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __lowercase ( __lowercase = 5000 ) -> int:
'''simple docstring'''
_A = [(i * (3 * i - 1)) // 2 for i in range(1 , __lowercase )]
for i, pentagonal_i in enumerate(__lowercase ):
for j in range(__lowercase , len(__lowercase ) ):
_A = pentagonal_nums[j]
_A = pentagonal_i + pentagonal_j
_A = pentagonal_j - pentagonal_i
if is_pentagonal(__lowercase ) and is_pentagonal(__lowercase ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 79 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase_ ( unittest.TestCase ):
__UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __a ( self , a , a , a ):
UpperCamelCase__ = TextaTextGenerationPipeline(model=a , tokenizer=a )
return generator, ["Something to write", "Something else"]
def __a ( self , a , a ):
UpperCamelCase__ = generator("Something there" )
self.assertEqual(a , [{"generated_text": ANY(a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
UpperCamelCase__ = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a )
self.assertEqual(
a , [
[{"generated_text": ANY(a )}, {"generated_text": ANY(a )}],
[{"generated_text": ANY(a )}, {"generated_text": ANY(a )}],
] , )
UpperCamelCase__ = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a )
self.assertEqual(
a , [
[{"generated_text": ANY(a )}, {"generated_text": ANY(a )}],
[{"generated_text": ANY(a )}, {"generated_text": ANY(a )}],
] , )
with self.assertRaises(a ):
generator(4 )
@require_torch
def __a ( self ):
UpperCamelCase__ = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
UpperCamelCase__ = generator("Something there" , do_sample=a )
self.assertEqual(a , [{"generated_text": ""}] )
UpperCamelCase__ = 3
UpperCamelCase__ = generator(
"Something there" , num_return_sequences=a , num_beams=a , )
UpperCamelCase__ = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(a , a )
UpperCamelCase__ = generator("This is a test" , do_sample=a , num_return_sequences=2 , return_tensors=a )
self.assertEqual(
a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
UpperCamelCase__ = generator.model.config.eos_token_id
UpperCamelCase__ = "<pad>"
UpperCamelCase__ = generator(
["This is a test", "This is a second test"] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , )
self.assertEqual(
a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def __a ( self ):
UpperCamelCase__ = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
UpperCamelCase__ = generator("Something there" , do_sample=a )
self.assertEqual(a , [{"generated_text": ""}] )
| 80 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : int = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCamelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''encodec'''
def __init__( self , _snake_case=[1.5, 3.0, 6.0, 12.0, 24.0] , _snake_case=24000 , _snake_case=1 , _snake_case=False , _snake_case=None , _snake_case=None , _snake_case=128 , _snake_case=32 , _snake_case=1 , _snake_case=[8, 5, 4, 2] , _snake_case="weight_norm" , _snake_case=7 , _snake_case=7 , _snake_case=3 , _snake_case=2 , _snake_case=True , _snake_case="reflect" , _snake_case=2 , _snake_case=2 , _snake_case=1.0 , _snake_case=1024 , _snake_case=None , _snake_case=True , **_snake_case , ):
"""simple docstring"""
_lowerCAmelCase = target_bandwidths
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = audio_channels
_lowerCAmelCase = normalize
_lowerCAmelCase = chunk_length_s
_lowerCAmelCase = overlap
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_filters
_lowerCAmelCase = num_residual_layers
_lowerCAmelCase = upsampling_ratios
_lowerCAmelCase = norm_type
_lowerCAmelCase = kernel_size
_lowerCAmelCase = last_kernel_size
_lowerCAmelCase = residual_kernel_size
_lowerCAmelCase = dilation_growth_rate
_lowerCAmelCase = use_causal_conv
_lowerCAmelCase = pad_mode
_lowerCAmelCase = compress
_lowerCAmelCase = num_lstm_layers
_lowerCAmelCase = trim_right_ratio
_lowerCAmelCase = codebook_size
_lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
_lowerCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**_snake_case )
@property
def snake_case ( self ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case ( self ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def snake_case ( self ):
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 82 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 0 |
'''simple docstring'''
from __future__ import annotations
class lowercase__ :
def __init__( self : Optional[int] ,lowerCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = data
_UpperCamelCase : Union[str, Any] = None
def __repr__( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = []
_UpperCamelCase : Optional[int] = self
while temp:
string_rep.append(F'{temp.data}' )
_UpperCamelCase : int = temp.next
return "->".join(lowerCamelCase__ )
def A__ ( UpperCAmelCase_ ):
if not elements_list:
raise Exception('The Elements List is empty' )
_UpperCamelCase : Dict = Node(elements_list[0] )
for i in range(1 , len(UpperCAmelCase_ ) ):
_UpperCamelCase : Optional[Any] = Node(elements_list[i] )
_UpperCamelCase : List[Any] = current.next
return head
def A__ ( UpperCAmelCase_ ):
if head_node is not None and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
print_reverse(head_node.next )
print(head_node.data )
def A__ ( ):
from doctest import testmod
testmod()
_UpperCamelCase : int = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print('Linked List:' )
print(UpperCAmelCase_ )
print('Elements in Reverse:' )
print_reverse(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 83 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('T')
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
UpperCAmelCase_ :deque[T] # Cache store of keys
UpperCAmelCase_ :set[T] # References of the keys in cache
UpperCAmelCase_ :int = 10 # Maximum capacity of cache
def __init__( self , __A ) -> None:
lowerCAmelCase_ :Tuple = deque()
lowerCAmelCase_ :Any = set()
if not n:
lowerCAmelCase_ :str = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowerCAmelCase_ :Any = n
def __lowerCAmelCase ( self , __A ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase_ :int = self.dq_store.pop()
self.key_reference.remove(__A )
else:
self.dq_store.remove(__A )
self.dq_store.appendleft(__A )
self.key_reference.add(__A )
def __lowerCAmelCase ( self ) -> None:
for k in self.dq_store:
print(__A )
def __repr__( self ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 84 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = CpmAntTokenizer
lowerCAmelCase_ : str = False
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
snake_case_ = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
snake_case_ = "今天天气真好!"
snake_case_ = ["今天", "天气", "真", "好", "!"]
snake_case_ = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
snake_case_ = "今天天气真好!"
snake_case_ = [tokenizer.bos_token] + tokens
snake_case_ = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
snake_case_ = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 85 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 0 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
lowerCamelCase__ = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 86 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 0 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class snake_case_ ( __A ):
def __init__( self : List[Any] , *lowercase_ : List[Any] , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple=None , **lowercase_ : str ) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_ )
lowercase__ : Tuple = eval_examples
lowercase__ : Optional[int] = post_process_function
def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[Dataset] = None , lowercase_ : Any=None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "eval" , **lowercase_ : Any , ) -> Dict[str, float]:
lowercase__ : List[Any] = gen_kwargs.copy()
lowercase__ : Optional[int] = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowercase__ : List[str] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowercase__ : Tuple = gen_kwargs
lowercase__ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ : str = self.get_eval_dataloader(lowercase_ )
lowercase__ : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : List[str] = self.compute_metrics
lowercase__ : str = None
lowercase__ : Any = time.time()
lowercase__ : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ : Optional[int] = eval_loop(
lowercase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowercase__ : int = compute_metrics
lowercase__ : Tuple = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ : Optional[int] = self.post_process_function(lowercase_ , lowercase_ , lowercase_ )
lowercase__ : str = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase__ : List[str] = metrics.pop(lowercase_ )
metrics.update(output.metrics )
else:
lowercase__ : Union[str, Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_ )
return metrics
def __UpperCamelCase ( self : str , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[int]=None , lowercase_ : str = "test" , **lowercase_ : Any ) -> Tuple:
lowercase__ : Any = gen_kwargs.copy()
lowercase__ : Tuple = self.get_test_dataloader(lowercase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ : List[Any] = self.compute_metrics
lowercase__ : Optional[Any] = None
lowercase__ : Any = time.time()
lowercase__ : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ : Dict = eval_loop(
lowercase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
lowercase__ : Optional[int] = compute_metrics
lowercase__ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ : int = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , "predict" )
lowercase__ : List[Any] = self.compute_metrics(lowercase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
lowercase__ : Tuple = metrics.pop(lowercase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_ )
| 87 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 0 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def __lowercase ( *_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : str ):
pass
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
_a : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict:
_a : Dict = np.array(lowerCAmelCase_ )
_a : List[Any] = npimg.shape
return {"hash": hashimage(lowerCAmelCase_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
lowerCAmelCase : List[str] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowercase ( self : Any ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ):
_a : Union[str, Any] = MaskGenerationPipeline(model=_UpperCAmelCase ,image_processor=_UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Any ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __lowercase ( self : Any ):
pass
@slow
@require_torch
def __lowercase ( self : Optional[int] ):
_a : int = pipeline('mask-generation' ,model='facebook/sam-vit-huge' )
_a : Union[str, Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 )
# Shortening by hashing
_a : Optional[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_21},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.99_67},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_93},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.99_09},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.98_79},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.98_34},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.97_16},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.96_12},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.95_99},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.95_52},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.95_32},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.95_16},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.94_99},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.94_83},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.94_64},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.94_08},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.93_35},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.93_26},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.92_62},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.89_99},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.89_86},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.89_84},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.88_73},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.88_71}
] ,)
# fmt: on
@require_torch
@slow
def __lowercase ( self : List[Any] ):
_a : int = 'facebook/sam-vit-huge'
_a : int = pipeline('mask-generation' ,model=_UpperCAmelCase )
_a : str = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 )
# Shortening by hashing
_a : Union[str, Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase ,decimals=4 ) ,[
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.02_10},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
] ,)
| 89 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=64 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = embedding_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = MobileBertModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = MobileBertForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MobileBertForNextSentencePrediction(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = MobileBertForPreTraining(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , next_sentence_label=lowerCamelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MobileBertForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MobileBertForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MobileBertForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = MobileBertForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
'''simple docstring'''
__lowerCamelCase = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ )
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase__ )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = MobileBertModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase__ )
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase__ )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase__ )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
return torch.tensor(
UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ , )
__A = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(lowerCamelCase__ )
__lowerCamelCase = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=lowerCamelCase__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCamelCase = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCamelCase = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 90 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : int , lowercase_ : MutableSequence[float]):
'''simple docstring'''
if len(lowercase_) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''')
SCREAMING_SNAKE_CASE_ : list[float] = list(lowercase_)
SCREAMING_SNAKE_CASE_ : str = degree
def __add__( self : int , lowercase_ : Polynomial):
'''simple docstring'''
if self.degree > polynomial_a.degree:
SCREAMING_SNAKE_CASE_ : List[str] = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowercase_)
else:
SCREAMING_SNAKE_CASE_ : int = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowercase_)
def __sub__( self : str , lowercase_ : Polynomial):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1])
def __neg__( self : str):
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients])
def __mul__( self : List[Any] , lowercase_ : Polynomial):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int | float):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ''''''
for i in range(self.degree , -1 , -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(lowercase_)
return polynomial
def __repr__( self : Tuple):
'''simple docstring'''
return self.__str__()
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : list[float] = [0] * self.degree
for i in range(self.degree):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : int | float = 0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : list[float] = [0] * (self.degree + 2)
SCREAMING_SNAKE_CASE_ : Any = constant
for i in range(self.degree + 1):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowercase_)
def __eq__( self : str , lowercase_ : object):
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Union[str, Any] , lowercase_ : object):
'''simple docstring'''
return not self.__eq__(lowercase_)
| 91 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class a__ ( snake_case__ ):
_a : Union[str, Any] = """mgp-str"""
def __init__( self , _A=[3_2, 1_2_8] , _A=4 , _A=3 , _A=2_7 , _A=3_8 , _A=5_0_2_5_7 , _A=3_0_5_2_2 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=4.0 , _A=True , _A=False , _A=1E-5 , _A=0.0 , _A=0.0 , _A=0.0 , _A=False , _A=0.02 , **_A , ):
"""simple docstring"""
super().__init__(**_A )
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = max_token_length
__lowerCAmelCase = num_character_labels
__lowerCAmelCase = num_bpe_labels
__lowerCAmelCase = num_wordpiece_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = distilled
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = drop_rate
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = attn_drop_rate
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = output_aa_attentions
__lowerCAmelCase = initializer_range
| 92 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowercase : Any = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
_lowercase : List[Any] = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
_lowercase : Optional[Any] = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return float((preds == labels).mean() )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str="binary" ):
"""simple docstring"""
lowercase_ : Dict = simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = float(fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : str = {}
for id_pred, label in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : str = F'''{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}'''
lowercase_ : Any = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowercase_ : List[str] = [(pred, label)]
lowercase_ , lowercase_ : Dict = [], []
for question, preds_labels in question_map.items():
lowercase_ , lowercase_ : List[Any] = zip(*__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=__SCREAMING_SNAKE_CASE , average='''macro''' )
fas.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(__SCREAMING_SNAKE_CASE ) )
ems.append(__SCREAMING_SNAKE_CASE )
lowercase_ : str = float(sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Tuple = sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = float(fa_score(y_true=__SCREAMING_SNAKE_CASE , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def _snake_case ( self ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def _snake_case ( self ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , fa_avg='''macro''' )
elif self.config_name == "record":
lowercase_ : List[str] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
lowercase_ : str = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 93 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
snake_case : List[str] = False
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 12
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :Optional[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
a :str = 12
a :Optional[Any] = 12
a :Dict = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
a :Dict = TransformeraDModel(**_lowerCamelCase )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = '''cpu'''
a :Optional[int] = self.dummy_vqvae
a :Any = self.dummy_text_encoder
a :Tuple = self.dummy_tokenizer
a :List[str] = self.dummy_transformer
a :str = VQDiffusionScheduler(self.num_embed )
a :Optional[int] = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowerCamelCase )
a :Union[str, Any] = VQDiffusionPipeline(
vqvae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , transformer=_lowerCamelCase , scheduler=_lowerCamelCase , learned_classifier_free_sampling_embeddings=_lowerCamelCase , )
a :List[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :List[str] = '''teddy bear playing in the pool'''
a :Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
a :List[str] = pipe([prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
a :List[str] = output.images
a :List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
a :Any = pipe(
[prompt] , generator=_lowerCamelCase , output_type='''np''' , return_dict=_lowerCamelCase , num_inference_steps=2 )[0]
a :List[Any] = image[0, -3:, -3:, -1]
a :Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
a :int = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[str] = '''cpu'''
a :Union[str, Any] = self.dummy_vqvae
a :Optional[Any] = self.dummy_text_encoder
a :int = self.dummy_tokenizer
a :Optional[int] = self.dummy_transformer
a :Optional[int] = VQDiffusionScheduler(self.num_embed )
a :str = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowerCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
a :Tuple = VQDiffusionPipeline(
vqvae=_lowerCamelCase , text_encoder=_lowerCamelCase , tokenizer=_lowerCamelCase , transformer=_lowerCamelCase , scheduler=_lowerCamelCase , learned_classifier_free_sampling_embeddings=_lowerCamelCase , )
a :int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
a :Optional[Any] = '''teddy bear playing in the pool'''
a :List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
a :Tuple = pipe([prompt] , generator=_lowerCamelCase , num_inference_steps=2 , output_type='''np''' )
a :Union[str, Any] = output.images
a :Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
a :int = pipe(
[prompt] , generator=_lowerCamelCase , output_type='''np''' , return_dict=_lowerCamelCase , num_inference_steps=2 )[0]
a :Union[str, Any] = image[0, -3:, -3:, -1]
a :List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
a :Optional[int] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
a :Any = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
a :List[Any] = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
a :Dict = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
a :Any = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=_lowerCamelCase , output_type='''np''' , )
a :Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 94 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : int = """speech_to_text_2"""
_lowercase : Tuple = ["""past_key_values"""]
_lowercase : Union[str, Any] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCAmelCase__=1_0_0_0_0 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=1_0_2_4 , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] =vocab_size
a__ : List[str] =d_model
a__ : List[str] =decoder_ffn_dim
a__ : Union[str, Any] =decoder_layers
a__ : Dict =decoder_attention_heads
a__ : Union[str, Any] =dropout
a__ : str =attention_dropout
a__ : Tuple =activation_dropout
a__ : List[str] =activation_function
a__ : Dict =init_std
a__ : str =decoder_layerdrop
a__ : Dict =use_cache
a__ : List[str] =decoder_layers
a__ : List[Any] =scale_embedding # scale factor will be sqrt(d_model) if True
a__ : Dict =max_target_positions
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 95 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase__ = 5_0000
lowercase__ = 5000
lowercase__ , lowercase__ = os.path.split(__file__)
lowercase__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _snake_case ( lowercase__ , lowercase__ ):
for i in range(lowercase__ ):
_lowerCamelCase : Optional[Any] = dataset[i]
@get_duration
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
for i in range(0 , len(lowercase__ ) , lowercase__ ):
_lowerCamelCase : str = dataset[i : i + batch_size]
@get_duration
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
with dataset.formatted_as(type=lowercase__ ):
for i in range(lowercase__ ):
_lowerCamelCase : Optional[Any] = dataset[i]
@get_duration
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
with dataset.formatted_as(type=lowercase__ ):
for i in range(0 , lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = dataset[i : i + batch_size]
def _snake_case ( ):
_lowerCamelCase : List[str] = {'num examples': SPEED_TEST_N_EXAMPLES}
_lowerCamelCase : str = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
_lowerCamelCase : Dict = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
_lowerCamelCase : List[Any] = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
_lowerCamelCase : List[Any] = generate_example_dataset(
os.path.join(lowercase__ , 'dataset.arrow' ) , lowercase__ , num_examples=lowercase__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(lowercase__ ) )
_lowerCamelCase : int = func(lowercase__ , **lowercase__ )
print('shuffling dataset' )
_lowerCamelCase : Dict = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(lowercase__ ) )
_lowerCamelCase : List[str] = func(
lowercase__ , **lowercase__ )
with open(lowercase__ , 'wb' ) as f:
f.write(json.dumps(lowercase__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 96 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 0 |
'''simple docstring'''
__snake_case = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :List[str] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__snake_case = [None] * 10000000
__snake_case = True
__snake_case = False
def a ( __a ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ :Dict = chain(next_number(__a ) )
UpperCamelCase__ :int = number_chain
while number < 10000000:
UpperCamelCase__ :Optional[int] = number_chain
number *= 10
return number_chain
def a ( __a = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __a ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""") | 97 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BeautifulSoup(requests.get(lowerCamelCase , params=lowerCamelCase ).content , 'html.parser' )
UpperCAmelCase__ = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCAmelCase__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 98 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowercase : Any = 2_5_0_0_0_4
lowercase : Optional[int] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Dict = MBartaaTokenizer
__A : Any = MBartaaTokenizerFast
__A : Any = True
__A : Tuple = True
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
a__ : str = MBartaaTokenizer(lowercase , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowercase)
tokenizer.save_pretrained(self.tmpdirname)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Dict = '<s>'
a__ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(lowercase) , 1054)
def __lowercase ( self) -> Dict:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1054)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Any = MBartaaTokenizer(lowercase , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=lowercase)
a__ : Dict = tokenizer.tokenize('This is a test')
self.assertListEqual(lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a__ : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
a__ : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase)
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a__ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowercase)
self.assertListEqual(
lowercase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = {'input_ids': [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def __lowercase ( self) -> Any:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a__ : Dict = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
a__ : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase)
a__ : List[str] = self.tokenizer_class.from_pretrained(lowercase , **lowercase)
a__ : int = tempfile.mkdtemp()
a__ : Dict = tokenizer_r.save_pretrained(lowercase)
a__ : Union[str, Any] = tokenizer_p.save_pretrained(lowercase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
a__ : Any = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f)
self.assertSequenceEqual(lowercase , lowercase)
# Checks everything loads correctly in the same way
a__ : Any = tokenizer_r.from_pretrained(lowercase)
a__ : Any = tokenizer_p.from_pretrained(lowercase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowercase)
# Save tokenizer rust, legacy_format=True
a__ : Any = tempfile.mkdtemp()
a__ : str = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase)
a__ : Dict = tokenizer_p.save_pretrained(lowercase)
# Checks it save with the same files
self.assertSequenceEqual(lowercase , lowercase)
# Checks everything loads correctly in the same way
a__ : Union[str, Any] = tokenizer_r.from_pretrained(lowercase)
a__ : Optional[Any] = tokenizer_p.from_pretrained(lowercase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase))
shutil.rmtree(lowercase)
# Save tokenizer rust, legacy_format=False
a__ : Any = tempfile.mkdtemp()
a__ : Optional[int] = tokenizer_r.save_pretrained(lowercase , legacy_format=lowercase)
a__ : str = tokenizer_p.save_pretrained(lowercase)
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
a__ : List[str] = tokenizer_r.from_pretrained(lowercase)
a__ : Dict = tokenizer_p.from_pretrained(lowercase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowercase , lowercase))
shutil.rmtree(lowercase)
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
"""simple docstring"""
__A : str = '''facebook/mbart-large-50-one-to-many-mmt'''
__A : Union[str, Any] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__A : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__A : List[Any] = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def __lowercase ( cls) -> Any:
'''simple docstring'''
a__ : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO')
a__ : Dict = 1
return cls
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 25_0001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 25_0004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 25_0020)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 25_0038)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
self.assertIn(lowercase , self.tokenizer.all_special_ids)
a__ : Any = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
a__ : Any = self.tokenizer.decode(lowercase , skip_special_tokens=lowercase)
a__ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase)
self.assertEqual(lowercase , lowercase)
self.assertNotIn(self.tokenizer.eos_token , lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Tuple = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , lowercase)
a__ : str = 10
a__ : Optional[Any] = self.tokenizer(lowercase , max_length=lowercase , truncation=lowercase).input_ids[0]
self.assertEqual(ids[0] , lowercase)
self.assertEqual(ids[-1] , 2)
self.assertEqual(len(lowercase) , lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR']) , [25_0053, 25_0001])
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[Any] = tempfile.mkdtemp()
a__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase)
a__ : Optional[int] = MBartaaTokenizer.from_pretrained(lowercase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase)
@require_torch
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase , return_tensors='pt')
a__ : Dict = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=len(self.expected_src_tokens) , return_tensors='pt' , )
a__ : Optional[int] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual((2, 14) , batch.input_ids.shape)
self.assertEqual((2, 14) , batch.attention_mask.shape)
a__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase)
self.assertEqual(2 , batch.decoder_input_ids[0, 0]) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Any = self.tokenizer(self.src_text , padding=lowercase , truncation=lowercase , max_length=3 , return_tensors='pt')
a__ : int = self.tokenizer(
text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=10 , return_tensors='pt')
a__ : Optional[int] = targets['input_ids']
a__ : Optional[Any] = shift_tokens_right(lowercase , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Tuple = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR')
self.assertEqual(
nested_simplify(lowercase) , {
# en_XX, A, test, EOS
'input_ids': [[25_0004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_0001,
} , )
| 99 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : List[Any] =KandinskyVaaInpaintPipeline
lowercase_ : Union[str, Any] =['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
lowercase_ : Optional[int] =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
lowercase_ : Union[str, Any] =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase_ : Tuple =False
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return 3_2
@property
def A__ ( self):
return self.time_input_dim
@property
def A__ ( self):
return self.time_input_dim * 4
@property
def A__ ( self):
return 1_0_0
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase = UNetaDConditionModel(**A__)
return model
@property
def A__ ( self):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self):
torch.manual_seed(0)
lowercase = VQModel(**self.dummy_movq_kwargs)
return model
def A__ ( self):
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='''linear''' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=A__ ,set_alpha_to_one=A__ ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=A__ ,)
lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A__ ( self ,A__ ,A__=0):
lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A__)).to(A__)
lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to(
A__)
# create init_image
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(A__)).to(A__)
lowercase = image.cpu().permute(0 ,2 ,3 ,1)[0]
lowercase = Image.fromarray(np.uinta(A__)).convert('''RGB''').resize((2_5_6, 2_5_6))
# create mask
lowercase = np.ones((6_4, 6_4) ,dtype=np.floataa)
lowercase = 0
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def A__ ( self):
lowercase = '''cpu'''
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**A__)
lowercase = pipe.to(A__)
pipe.set_progress_bar_config(disable=A__)
lowercase = pipe(**self.get_dummy_inputs(A__))
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(A__) ,return_dict=A__ ,)[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}')
assert image.shape == (1, 6_4, 6_4, 3)
lowercase = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self):
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''')
lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
lowercase = np.ones((7_6_8, 7_6_8) ,dtype=np.floataa)
lowercase = 0
lowercase = '''a hat'''
lowercase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' ,torch_dtype=torch.floataa)
pipe_prior.to(A__)
lowercase = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' ,torch_dtype=torch.floataa)
lowercase = pipeline.to(A__)
pipeline.set_progress_bar_config(disable=A__)
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase , lowercase = pipe_prior(
A__ ,generator=A__ ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
lowercase = pipeline(
image=A__ ,mask_image=A__ ,image_embeds=A__ ,negative_image_embeds=A__ ,generator=A__ ,num_inference_steps=1_0_0 ,height=7_6_8 ,width=7_6_8 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(A__ ,A__)
| 101 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
"""simple docstring"""
import os
from math import logaa
def lowercase ( _snake_case : str = "base_exp.txt" ) ->int:
"""simple docstring"""
__snake_case : float = 0
__snake_case : List[Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) ):
__snake_case , __snake_case : Optional[int] = list(map(_snake_case , line.split(''',''' ) ) )
if x * logaa(_snake_case ) > largest:
__snake_case : Tuple = x * logaa(_snake_case )
__snake_case : Dict = i + 1
return result
if __name__ == "__main__":
print(solution())
| 102 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Tuple = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __snake_case ( UpperCamelCase_ ):
_a = '''xlm-roberta-xl'''
def __init__( self : int , A_ : List[str]=2_5_0_8_8_0 , A_ : List[str]=2_5_6_0 , A_ : Optional[int]=3_6 , A_ : List[Any]=3_2 , A_ : Optional[int]=1_0_2_4_0 , A_ : Dict="gelu" , A_ : int=0.1 , A_ : Optional[Any]=0.1 , A_ : int=5_1_4 , A_ : Any=1 , A_ : Optional[Any]=0.02 , A_ : str=1e-05 , A_ : Dict=1 , A_ : Any=0 , A_ : Tuple=2 , A_ : str="absolute" , A_ : str=True , A_ : List[str]=None , **A_ : Dict , ):
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_)
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Dict = type_vocab_size
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[Any] = position_embedding_type
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : List[str] = classifier_dropout
class __snake_case ( UpperCamelCase_ ):
@property
def UpperCAmelCase__ ( self : List[str]):
if self.task == "multiple-choice":
lowerCAmelCase_ : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase_ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 103 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Tuple=0 ):
__lowercase = np.random.RandomState(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**lowercase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs['''prompt''']]
# forward
__lowercase = pipe(**lowercase__ )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop('''prompt''' )]
__lowercase = pipe.tokenizer(
lowercase__ ,padding='''max_length''' ,max_length=pipe.tokenizer.model_max_length ,truncation=lowercase__ ,return_tensors='''np''' ,)
__lowercase = text_inputs['''input_ids''']
__lowercase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase = prompt_embeds
# forward
__lowercase = pipe(**lowercase__ )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * ['''this is a negative prompt''']
__lowercase = negative_prompt
__lowercase = 3 * [inputs['''prompt''']]
# forward
__lowercase = pipe(**lowercase__ )
__lowercase = output.images[0, -3:, -3:, -1]
__lowercase = self.get_dummy_inputs()
__lowercase = 3 * [inputs.pop('''prompt''' )]
__lowercase = []
for p in [prompt, negative_prompt]:
__lowercase = pipe.tokenizer(
lowercase__ ,padding='''max_length''' ,max_length=pipe.tokenizer.model_max_length ,truncation=lowercase__ ,return_tensors='''np''' ,)
__lowercase = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase = embeds
# forward
__lowercase = pipe(**lowercase__ )
__lowercase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def SCREAMING_SNAKE_CASE ( self : str ):
# using the PNDM scheduler by default
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''onnx''' ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
__lowercase = sd_pipe([prompt] ,guidance_scale=6.0 ,num_inference_steps=1_0 ,output_type='''np''' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,subfolder='''scheduler''' ,revision='''onnx''' )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,revision='''onnx''' ,scheduler=lowercase__ ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''open neural network exchange'''
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=1_0 ,generator=lowercase__ ,output_type='''np''' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,subfolder='''scheduler''' ,revision='''onnx''' )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,revision='''onnx''' ,scheduler=lowercase__ ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''open neural network exchange'''
__lowercase = np.random.RandomState(0 )
__lowercase = sd_pipe([prompt] ,guidance_scale=7.5 ,num_inference_steps=1_0 ,generator=lowercase__ ,output_type='''np''' )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = 0
def test_callback_fn(lowercase__ : int ,lowercase__ : int ,lowercase__ : np.ndarray ) -> None:
__lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
__lowercase = latents[0, -3:, -3:, -1]
__lowercase = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
__lowercase = False
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,revision='''onnx''' ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = '''Andromeda galaxy in a bottle'''
__lowercase = np.random.RandomState(0 )
pipe(
prompt=lowercase__ ,num_inference_steps=5 ,guidance_scale=7.5 ,generator=lowercase__ ,callback=lowercase__ ,callback_steps=1 ,)
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' ,revision='''onnx''' ,safety_checker=lowercase__ ,feature_extractor=lowercase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
assert isinstance(lowercase__ ,lowercase__ )
assert pipe.safety_checker is None
__lowercase = pipe('''example prompt''' ,num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase__ )
__lowercase = OnnxStableDiffusionPipeline.from_pretrained(lowercase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe('''example prompt''' ,num_inference_steps=2 ).images[0]
assert image is not None
| 104 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Union[str, Any] = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
from collections import namedtuple
__UpperCamelCase : Optional[int] = namedtuple('''from_to''', '''from_ to''')
__UpperCamelCase : List[Any] = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_0_1, 1_0_0_0),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'''cubicyard''': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'''cubicfoot''': from_to(0.0_2_8, 3_5.3_1_4_7),
'''cup''': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ''', '''.join(A_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ''', '''.join(A_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 0 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __magic_name__ ( A : int ):
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __magic_name__ ( A : Optional[Any], A : List[Any] ):
'''simple docstring'''
a = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
a = key.replace("heads.cmd.mim_head.cls.predictions", "mmm_image_head" )
a = key.replace("heads.cmd.mlm_head.cls.predictions", "mmm_text_head" )
a = key.replace("heads.cmd.itm_head.cls", "itm_head" )
a = key.replace("heads.cmd.itm_head.pooler", "itm_head.pooler" )
a = key.replace("heads.cmd.clip_head.logit_scale", "flava.logit_scale" )
a = key.replace("heads.fairseq_mlm.cls.predictions", "mlm_head" )
a = key.replace("heads.imagenet.mim_head.cls.predictions", "mim_head" )
a = key.replace("mm_text_projection", "flava.text_to_mm_projection" )
a = key.replace("mm_image_projection", "flava.image_to_mm_projection" )
a = key.replace("image_encoder.module", "flava.image_model" )
a = key.replace("text_encoder.module", "flava.text_model" )
a = key.replace("mm_encoder.module.encoder.cls_token", "flava.multimodal_model.cls_token" )
a = key.replace("mm_encoder.module", "flava.multimodal_model" )
a = key.replace("text_projection", "flava.text_projection" )
a = key.replace("image_projection", "flava.image_projection" )
a = value.float()
for key, value in codebook_state_dict.items():
a = value
return upgrade
@torch.no_grad()
def __magic_name__ ( A : Union[str, Any], A : List[Any], A : List[Any], A : Any=None ):
'''simple docstring'''
if config_path is not None:
a = FlavaConfig.from_pretrained(A )
else:
a = FlavaConfig()
a = FlavaForPreTraining(A ).eval()
a = convert_dalle_checkpoint(A, A, save_checkpoint=A )
if os.path.exists(A ):
a = torch.load(A, map_location="cpu" )
else:
a = torch.hub.load_state_dict_from_url(A, map_location="cpu" )
a = upgrade_state_dict(A, A )
hf_model.load_state_dict(A )
a = hf_model.state_dict()
a = count_parameters(A )
a = count_parameters(A ) + count_parameters(A )
assert torch.allclose(A, A, atol=1E-3 )
hf_model.save_pretrained(A )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 107 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 108 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: List[Any] = logging.get_logger(__name__)
A: Any = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Tuple = 'sew'
def __init__( self , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE="group" , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="mean" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : int = feat_extract_norm
UpperCAmelCase : Optional[Any] = feat_extract_activation
UpperCAmelCase : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = conv_bias
UpperCAmelCase : int = num_conv_pos_embeddings
UpperCAmelCase : Tuple = num_conv_pos_embedding_groups
UpperCAmelCase : List[str] = len(self.conv_dim )
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : Dict = squeeze_factor
UpperCAmelCase : Optional[int] = hidden_act
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : int = hidden_dropout
UpperCAmelCase : Tuple = attention_dropout
UpperCAmelCase : Dict = activation_dropout
UpperCAmelCase : int = feat_proj_dropout
UpperCAmelCase : List[Any] = final_dropout
UpperCAmelCase : str = layerdrop
UpperCAmelCase : List[Any] = layer_norm_eps
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase : str = apply_spec_augment
UpperCAmelCase : Union[str, Any] = mask_time_prob
UpperCAmelCase : Optional[Any] = mask_time_length
UpperCAmelCase : Optional[int] = mask_time_min_masks
UpperCAmelCase : int = mask_feature_prob
UpperCAmelCase : Dict = mask_feature_length
UpperCAmelCase : Tuple = mask_feature_min_masks
# ctc loss
UpperCAmelCase : Optional[int] = ctc_loss_reduction
UpperCAmelCase : int = ctc_zero_infinity
# sequence classification
UpperCAmelCase : Optional[int] = use_weighted_layer_sum
UpperCAmelCase : Tuple = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 109 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 0 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCAmelCase = logging.getLogger(__name__)
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = '''token-classification'''
def __init__( self: int , UpperCamelCase_: Optional[Any] ) -> Dict:
"""simple docstring"""
if type(UpperCamelCase_ ) == dict:
lowercase__ = Namespace(**UpperCamelCase_ )
lowercase__ = import_module('''tasks''' )
try:
lowercase__ = getattr(UpperCamelCase_ , hparams.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
lowercase__ = self.token_classification_task.get_labels(hparams.labels )
lowercase__ = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase_ , len(self.labels ) , self.mode )
def lowerCamelCase_ ( self: Tuple , **UpperCamelCase_: Optional[int] ) -> str:
"""simple docstring"""
return self.model(**UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: int ) -> int:
"""simple docstring"""
lowercase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowercase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ = self(**UpperCamelCase_ )
lowercase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ = self._feature_file(UpperCamelCase_ )
if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowercase__ = torch.load(UpperCamelCase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowercase__ = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase_ )
lowercase__ = self.token_classification_task.convert_examples_to_features(
UpperCamelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , UpperCamelCase_ )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: bool = False ) -> DataLoader:
"""simple docstring"""
lowercase__ = self._feature_file(UpperCamelCase_ )
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowercase__ = torch.load(UpperCamelCase_ )
lowercase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , batch_size=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: int , UpperCamelCase_: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
"""Compute validation""" ""
lowercase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowercase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ = self(**UpperCamelCase_ )
lowercase__ , lowercase__ = outputs[:2]
lowercase__ = logits.detach().cpu().numpy()
lowercase__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: List[str] ) -> int:
"""simple docstring"""
lowercase__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
lowercase__ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
lowercase__ = np.argmax(UpperCamelCase_ , axis=2 )
lowercase__ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowercase__ = dict(enumerate(self.labels ) )
lowercase__ = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(UpperCamelCase_ , UpperCamelCase_ ),
'''precision''': precision_score(UpperCamelCase_ , UpperCamelCase_ ),
'''recall''': recall_score(UpperCamelCase_ , UpperCamelCase_ ),
'''f1''': fa_score(UpperCamelCase_ , UpperCamelCase_ ),
}
lowercase__ = dict(results.items() )
lowercase__ = results
return ret, preds_list, out_label_list
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._eval_end(UpperCamelCase_ )
lowercase__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Dict ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._eval_end(UpperCamelCase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Optional[Any] , UpperCamelCase_: str ) -> Optional[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=UpperCamelCase_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=UpperCamelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=UpperCamelCase_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=UpperCamelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCAmelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = NERTransformer(args)
lowerCAmelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 110 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ) -> int:
lowercase__ : List[Any] = parent
lowercase__ : Any = 13
lowercase__ : List[str] = 7
lowercase__ : Dict = True
lowercase__ : Optional[Any] = True
lowercase__ : Tuple = True
lowercase__ : List[str] = True
lowercase__ : List[str] = 99
lowercase__ : Tuple = 384
lowercase__ : Optional[Any] = 2
lowercase__ : Any = 4
lowercase__ : str = 37
lowercase__ : Optional[Any] = "gelu"
lowercase__ : List[Any] = 0.1
lowercase__ : Union[str, Any] = 0.1
lowercase__ : Dict = 512
lowercase__ : int = 16
lowercase__ : Optional[int] = 2
lowercase__ : Any = 0.0_2
lowercase__ : str = 3
lowercase__ : int = 4
lowercase__ : Dict = 128
lowercase__ : Any = 2
lowercase__ : Tuple = 9
lowercase__ : List[Any] = 1
lowercase__ : Any = None
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Any = None
if self.use_input_mask:
lowercase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[str] = None
if self.use_token_type_ids:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Dict = None
lowercase__ : Dict = None
lowercase__ : str = None
if self.use_labels:
lowercase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Any = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
lowercase__ : Optional[int] = TFConvBertModel(config=_A )
lowercase__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase__ : str = [input_ids, input_mask]
lowercase__ : List[str] = model(_A )
lowercase__ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
lowercase__ : Dict = TFConvBertForMaskedLM(config=_A )
lowercase__ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
lowercase__ : List[Any] = self.num_labels
lowercase__ : Any = TFConvBertForSequenceClassification(config=_A )
lowercase__ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
lowercase__ : Optional[Any] = self.num_choices
lowercase__ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
lowercase__ : Any = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Any = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
lowercase__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
lowercase__ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowercase__ : int = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : List[str] = self.num_labels
lowercase__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
lowercase__ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : str = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
lowercase__ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
lowercase__ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase__ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Any = self.prepare_config_and_inputs()
(
lowercase__
) : List[Any] = config_and_inputs
lowercase__ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : str = TFConvBertModelTester(self )
lowercase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def _lowerCAmelCase( self ) -> str:
self.config_tester.run_common_tests()
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def _lowerCAmelCase( self ) -> int:
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def _lowerCAmelCase( self ) -> str:
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = True
lowercase__ : Any = True
if hasattr(_A , '''use_cache''' ):
lowercase__ : List[Any] = True
lowercase__ : int = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowercase__ : Optional[Any] = getattr(self.model_tester , '''key_length''' , _A )
for model_class in self.all_model_classes:
lowercase__ : List[str] = self._prepare_for_class(_A , _A )
lowercase__ : List[Any] = model_class(_A )
lowercase__ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
lowercase__ : Optional[Any] = os.path.join(_A , '''saved_model''' , '''1''' )
lowercase__ : Tuple = tf.keras.models.load_model(_A )
lowercase__ : str = model(_A )
if self.is_encoder_decoder:
lowercase__ : Optional[Any] = outputs["encoder_hidden_states"]
lowercase__ : str = outputs["encoder_attentions"]
else:
lowercase__ : Any = outputs["hidden_states"]
lowercase__ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ) , _A )
lowercase__ : Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : str = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(_A )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = True
lowercase__ : List[str] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
lowercase__ : Any = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowercase__ : Optional[int] = getattr(self.model_tester , '''key_length''' , _A )
lowercase__ : int = getattr(self.model_tester , '''key_length''' , _A )
def check_decoder_attentions_output(__lowerCAmelCase ):
lowercase__ : int = len(_A )
self.assertEqual(out_len % 2 , 0 )
lowercase__ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__lowerCAmelCase ):
lowercase__ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = True
lowercase__ : Optional[Any] = False
lowercase__ : Tuple = model_class(_A )
lowercase__ : Any = model(self._prepare_for_class(_A , _A ) )
lowercase__ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
lowercase__ : Optional[Any] = model_class(_A )
lowercase__ : int = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase__ : str = True
lowercase__ : int = model_class(_A )
lowercase__ : List[str] = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
lowercase__ : str = True
lowercase__ : int = True
lowercase__ : Dict = model_class(_A )
lowercase__ : str = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) )
self.assertEqual(model.config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[str] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
lowercase__ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ : Tuple = model(_A )[0]
lowercase__ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _A )
lowercase__ : Union[str, Any] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
| 198 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase__ : List[str] = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
lowercase__ : Dict = None
def UpperCamelCase_ ( ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=lowerCAmelCase__ , default=1.0 , help='Predict \"\" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=lowerCAmelCase__ , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Optional[Any] = bool(qa['answers']['text'] )
return qid_to_has_ans
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
def remove_articles(lowerCAmelCase__ : List[Any] ):
return ARTICLES_REGEX.sub(' ' , lowerCAmelCase__ )
def white_space_fix(lowerCAmelCase__ : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(lowerCAmelCase__ : List[str] ):
lowerCAmelCase_ : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCAmelCase__ : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
if not s:
return []
return normalize_answer(lowerCAmelCase__ ).split()
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ) -> List[str]:
"""simple docstring"""
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Tuple = get_tokens(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = get_tokens(lowerCAmelCase__ )
lowerCAmelCase_ : str = collections.Counter(lowerCAmelCase__ ) & collections.Counter(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = sum(common.values() )
if len(lowerCAmelCase__ ) == 0 or len(lowerCAmelCase__ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCAmelCase_ : str = 1.0 * num_same / len(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = 1.0 * num_same / len(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Tuple = qa["id"]
lowerCAmelCase_ : Dict = [t for t in qa["answers"]["text"] if normalize_answer(lowerCAmelCase__ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ : Any = [""]
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
lowerCAmelCase_ : str = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ : str = max(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
lowerCAmelCase_ : int = max(compute_fa(lowerCAmelCase__ , lowerCAmelCase__ ) for a in gold_answers )
return exact_scores, fa_scores
def UpperCamelCase_ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = {}
for qid, s in scores.items():
lowerCAmelCase_ : Optional[int] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ : Optional[int] = float(not qid_to_has_ans[qid] )
else:
lowerCAmelCase_ : Optional[int] = s
return new_scores
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any]=None ) -> List[str]:
"""simple docstring"""
if not qid_list:
lowerCAmelCase_ : Optional[Any] = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores.values() ) / total),
('f1', 100.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowerCAmelCase_ : Any = len(lowerCAmelCase__ )
return collections.OrderedDict(
[
('exact', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def UpperCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for k in new_eval:
lowerCAmelCase_ : Union[str, Any] = new_eval[k]
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
plt.step(lowerCAmelCase__ , lowerCAmelCase__ , color='b' , alpha=0.2 , where='post' )
plt.fill_between(lowerCAmelCase__ , lowerCAmelCase__ , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowerCAmelCase__ )
plt.savefig(lowerCAmelCase__ )
plt.clf()
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Union[str, Any]=None ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : Optional[Any] = [1.0]
lowerCAmelCase_ : List[str] = [0.0]
lowerCAmelCase_ : Optional[int] = 0.0
for i, qid in enumerate(lowerCAmelCase__ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ : Tuple = true_pos / float(i + 1 )
lowerCAmelCase_ : List[str] = true_pos / float(lowerCAmelCase__ )
if i == len(lowerCAmelCase__ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCAmelCase__ )
recalls.append(lowerCAmelCase__ )
if out_image:
plot_pr_curve(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return {"ap": 100.0 * avg_prec}
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if out_image_dir and not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
lowerCAmelCase_ : int = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCAmelCase_ : List[Any] = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowerCAmelCase_ : int = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowerCAmelCase_ : Dict = {k: float(lowerCAmelCase__ ) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ : Any = make_precision_recall_eval(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , out_image=os.path.join(lowerCAmelCase__ , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , 'pr_exact' )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , 'pr_f1' )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , 'pr_oracle' )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if not qid_list:
return
lowerCAmelCase_ : int = [na_probs[k] for k in qid_list]
lowerCAmelCase_ : List[Any] = np.ones_like(lowerCAmelCase__ ) / float(len(lowerCAmelCase__ ) )
plt.hist(lowerCAmelCase__ , weights=lowerCAmelCase__ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(lowerCAmelCase__ , f"na_prob_hist_{name}.png" ) )
plt.clf()
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCAmelCase_ : List[str] = num_no_ans
lowerCAmelCase_ : List[str] = cur_score
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : List[str] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : na_probs[k] )
for i, qid in enumerate(lowerCAmelCase__ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ : Tuple = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ : int = -1
else:
lowerCAmelCase_ : str = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ : Optional[int] = cur_score
lowerCAmelCase_ : str = na_probs[qid]
return 100.0 * best_score / len(lowerCAmelCase__ ), best_thresh
def UpperCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Dict = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = find_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = best_exact
lowerCAmelCase_ : Dict = exact_thresh
lowerCAmelCase_ : List[str] = best_fa
lowerCAmelCase_ : int = fa_thresh
def UpperCamelCase_ ( ) -> str:
"""simple docstring"""
with open(OPTS.data_file ) as f:
lowerCAmelCase_ : int = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = dataset_json["data"]
with open(OPTS.pred_file ) as f:
lowerCAmelCase_ : int = json.load(lowerCAmelCase__ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCAmelCase_ : Union[str, Any] = json.load(lowerCAmelCase__ )
else:
lowerCAmelCase_ : str = {k: 0.0 for k in preds}
lowerCAmelCase_ : str = make_qid_to_has_ans(lowerCAmelCase__ ) # maps qid to True/False
lowerCAmelCase_ : int = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ : Optional[int] = get_raw_scores(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
lowerCAmelCase_ : Any = apply_no_ans_threshold(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.na_prob_thresh )
lowerCAmelCase_ : List[str] = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ )
if has_ans_qids:
lowerCAmelCase_ : int = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , 'HasAns' )
if no_ans_qids:
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(lowerCAmelCase__ , lowerCAmelCase__ , qid_list=lowerCAmelCase__ )
merge_eval(lowerCAmelCase__ , lowerCAmelCase__ , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(lowerCAmelCase__ , lowerCAmelCase__ , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
else:
print(json.dumps(lowerCAmelCase__ , indent=2 ) )
if __name__ == "__main__":
lowercase__ : List[Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 224 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 0 |
'''simple docstring'''
lowercase_ = {}
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) ->List[Any]:
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
_SCREAMING_SNAKE_CASE = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
_SCREAMING_SNAKE_CASE = _calculate(days - 1 , __lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
_SCREAMING_SNAKE_CASE = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
_SCREAMING_SNAKE_CASE = _calculate(days - 1 , __lowerCamelCase , 0 )
_SCREAMING_SNAKE_CASE = state_late + state_absent + state_ontime
_SCREAMING_SNAKE_CASE = prizestrings
return prizestrings
def lowerCamelCase ( __lowerCamelCase : int = 30 ) ->List[Any]:
return _calculate(__lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 58 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__snake_case = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class __snake_case ( A__ ):
__lowerCamelCase : str = """ernie_m"""
__lowerCamelCase : Tuple = {"""dropout""": """classifier_dropout""", """num_classes""": """num_labels"""}
def __init__( self , snake_case__ = 25_0002 , snake_case__ = 768 , snake_case__ = 12 , snake_case__ = 12 , snake_case__ = 3072 , snake_case__ = "gelu" , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 514 , snake_case__ = 0.02 , snake_case__ = 1 , snake_case__ = 1e-05 , snake_case__=None , snake_case__=False , snake_case__=0.0 , **snake_case__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase : Dict =vocab_size
UpperCAmelCase : Union[str, Any] =hidden_size
UpperCAmelCase : Any =num_hidden_layers
UpperCAmelCase : List[Any] =num_attention_heads
UpperCAmelCase : Tuple =intermediate_size
UpperCAmelCase : List[Any] =hidden_act
UpperCAmelCase : Optional[Any] =hidden_dropout_prob
UpperCAmelCase : int =attention_probs_dropout_prob
UpperCAmelCase : Tuple =max_position_embeddings
UpperCAmelCase : Union[str, Any] =initializer_range
UpperCAmelCase : List[str] =layer_norm_eps
UpperCAmelCase : Union[str, Any] =classifier_dropout
UpperCAmelCase : str =is_decoder
UpperCAmelCase : str =act_dropout
| 348 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
from __future__ import annotations
from math import pi, sqrt
def a ( _UpperCAmelCase : float , _UpperCAmelCase : float ):
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''CLIPFeatureExtractor''']
lowercase_ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 303 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 0 |
"""simple docstring"""
import math
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] = [True] * n
_lowerCamelCase : Dict = False
_lowerCamelCase : Any = False
_lowerCamelCase : str = True
for i in range(3, int(n**0.5 + 1 ), 2 ):
_lowerCamelCase : Any = i * 2
while index < n:
_lowerCamelCase : str = False
_lowerCamelCase : Any = index + i
_lowerCamelCase : Dict = [2]
for i in range(3, A_, 2 ):
if is_prime[i]:
primes.append(A_ )
return primes
def snake_case_ ( A_ : int = 99_99_66_66_33_33 ):
'''simple docstring'''
_lowerCamelCase : Dict = math.floor(math.sqrt(A_ ) ) + 1_00
_lowerCamelCase : List[str] = prime_sieve(A_ )
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Tuple = primes[prime_index]
while (last_prime**2) <= limit:
_lowerCamelCase : Union[str, Any] = primes[prime_index + 1]
_lowerCamelCase : Dict = last_prime**2
_lowerCamelCase : Dict = next_prime**2
# Get numbers divisible by lps(current)
_lowerCamelCase : str = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_lowerCamelCase : Optional[int] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_lowerCamelCase : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_lowerCamelCase : List[str] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 72 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = KandinskyVaaControlnetImgaImgPipeline
__lowerCamelCase : Optional[int] = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowerCamelCase : Tuple = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowerCamelCase : List[str] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowerCamelCase : Union[str, Any] = False
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return 32
@property
def _lowerCAmelCase ( self ):
return self.time_input_dim
@property
def _lowerCAmelCase ( self ):
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ):
return 100
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : List[str] = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
A : Optional[int] = UNetaDConditionModel(**_A )
return model
@property
def _lowerCAmelCase ( self ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase ( self ):
A : Tuple = self.dummy_unet
A : Dict = self.dummy_movq
A : List[str] = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_0085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
A : Optional[Any] = DDIMScheduler(**_A )
A : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=0 ):
A : str = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(_A ) ).to(_A )
A : str = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
A : Union[str, Any] = floats_tensor((1, 3, 64, 64), rng=random.Random(_A ) ).to(_A )
A : Tuple = image.cpu().permute(0, 2, 3, 1 )[0]
A : Any = Image.fromarray(np.uinta(_A ) ).convert("""RGB""" ).resize((256, 256) )
# create hint
A : str = floats_tensor((1, 3, 64, 64), rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith("""mps""" ):
A : Optional[int] = torch.manual_seed(_A )
else:
A : str = torch.Generator(device=_A ).manual_seed(_A )
A : Tuple = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _lowerCAmelCase ( self ):
A : Dict = "cpu"
A : Dict = self.get_dummy_components()
A : List[Any] = self.pipeline_class(**_A )
A : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
A : Optional[int] = pipe(**self.get_dummy_inputs(_A ) )
A : Tuple = output.images
A : int = pipe(
**self.get_dummy_inputs(_A ), return_dict=_A, )[0]
A : Tuple = image[0, -3:, -3:, -1]
A : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A : Dict = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ):
A : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
A : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A : int = init_image.resize((512, 512) )
A : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A : Tuple = torch.from_numpy(np.array(_A ) ).float() / 255.0
A : str = hint.permute(2, 0, 1 ).unsqueeze(0 )
A : int = "A robot, 4k photo"
A : List[Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""", torch_dtype=torch.floataa )
pipe_prior.to(_A )
A : Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""", torch_dtype=torch.floataa )
A : Union[str, Any] = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
A : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
A : int = pipe_prior(
_A, image=_A, strength=0.85, generator=_A, negative_prompt="""""", ).to_tuple()
A : str = pipeline(
image=_A, image_embeds=_A, negative_image_embeds=_A, hint=_A, generator=_A, num_inference_steps=100, height=512, width=512, strength=0.5, output_type="""np""", )
A : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_A, _A )
| 116 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 0 |
'''simple docstring'''
def _A ( A__ = 100 ):
"""simple docstring"""
__lowercase = n * (n + 1) * (2 * n + 1) / 6
__lowercase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 104 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _lowerCamelCase ( A__ ):
"""simple docstring"""
snake_case = "vivit"
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=[2, 16, 16] , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu_fast" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-06 , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )->Optional[Any]:
'''simple docstring'''
A_ : Tuple = hidden_size
A_ : Dict = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[int] = layer_norm_eps
A_ : List[Any] = image_size
A_ : List[Any] = num_frames
A_ : Optional[Any] = tubelet_size
A_ : Optional[int] = num_channels
A_ : Dict = qkv_bias
super().__init__(**_A )
| 186 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[int]:
assert isinstance(lowercase ,lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
snake_case : Union[str, Any] = tmp_path / "cache"
snake_case : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case : List[str] = TextDatasetReader(lowercase ,cache_dir=lowercase ,keep_in_memory=lowercase ).read()
_check_text_dataset(lowercase ,lowercase )
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] ,)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
snake_case : Any = tmp_path / "cache"
snake_case : Optional[Any] = {"text": "string"}
snake_case : Any = features.copy() if features else default_expected_features
snake_case : List[str] = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case : Optional[Any] = TextDatasetReader(lowercase ,features=lowercase ,cache_dir=lowercase ).read()
_check_text_dataset(lowercase ,lowercase )
@pytest.mark.parametrize("""split""" ,[None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[Any]:
snake_case : Union[str, Any] = tmp_path / "cache"
snake_case : List[str] = {"text": "string"}
snake_case : List[str] = TextDatasetReader(lowercase ,cache_dir=lowercase ,split=lowercase ).read()
_check_text_dataset(lowercase ,lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" ,[str, list] )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
if issubclass(lowercase ,lowercase ):
snake_case : Union[str, Any] = text_path
elif issubclass(lowercase ,lowercase ):
snake_case : Union[str, Any] = [text_path]
snake_case : int = tmp_path / "cache"
snake_case : Optional[int] = {"text": "string"}
snake_case : List[str] = TextDatasetReader(lowercase ,cache_dir=lowercase ).read()
_check_text_dataset(lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=("train",) ) -> str:
assert isinstance(lowercase ,lowercase )
for split in splits:
snake_case : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" ,[False, True] )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Dict:
snake_case : List[str] = tmp_path / "cache"
snake_case : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case : List[Any] = TextDatasetReader({"""train""": text_path} ,cache_dir=lowercase ,keep_in_memory=lowercase ).read()
_check_text_datasetdict(lowercase ,lowercase )
@pytest.mark.parametrize(
"""features""" ,[
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] ,)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Dict:
snake_case : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case : Tuple = {"text": "string"}
snake_case : Any = features.copy() if features else default_expected_features
snake_case : Dict = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case : str = TextDatasetReader({"""train""": text_path} ,features=lowercase ,cache_dir=lowercase ).read()
_check_text_datasetdict(lowercase ,lowercase )
@pytest.mark.parametrize("""split""" ,[None, NamedSplit("""train""" ), """train""", """test"""] )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Dict:
if split:
snake_case : Optional[int] = {split: text_path}
else:
snake_case : List[Any] = "train"
snake_case : Tuple = {"train": text_path, "test": text_path}
snake_case : Any = tmp_path / "cache"
snake_case : List[str] = {"text": "string"}
snake_case : str = TextDatasetReader(lowercase ,cache_dir=lowercase ).read()
_check_text_datasetdict(lowercase ,lowercase ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 124 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a: Optional[int] = logging.get_logger(__name__)
__a: Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCAmelCase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "perceiver"
def __init__( self , __lowerCAmelCase=256 , __lowerCAmelCase=1280 , __lowerCAmelCase=768 , __lowerCAmelCase=1 , __lowerCAmelCase=26 , __lowerCAmelCase=8 , __lowerCAmelCase=8 , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="kv" , __lowerCAmelCase=1 , __lowerCAmelCase=1 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=True , __lowerCAmelCase=262 , __lowerCAmelCase=2048 , __lowerCAmelCase=56 , __lowerCAmelCase=[368, 496] , __lowerCAmelCase=16 , __lowerCAmelCase=1920 , __lowerCAmelCase=16 , __lowerCAmelCase=[1, 16, 224, 224] , **__lowerCAmelCase , ) -> str:
super().__init__(**_A )
lowercase__ : Dict = num_latents
lowercase__ : List[Any] = d_latents
lowercase__ : Union[str, Any] = d_model
lowercase__ : Optional[int] = num_blocks
lowercase__ : List[Any] = num_self_attends_per_block
lowercase__ : Tuple = num_self_attention_heads
lowercase__ : List[str] = num_cross_attention_heads
lowercase__ : List[Any] = qk_channels
lowercase__ : Any = v_channels
lowercase__ : Any = cross_attention_shape_for_attention
lowercase__ : List[str] = self_attention_widening_factor
lowercase__ : Any = cross_attention_widening_factor
lowercase__ : List[Any] = hidden_act
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Any = initializer_range
lowercase__ : List[Any] = layer_norm_eps
lowercase__ : Tuple = use_query_residual
# masked language modeling attributes
lowercase__ : List[str] = vocab_size
lowercase__ : Union[str, Any] = max_position_embeddings
# image classification attributes
lowercase__ : Dict = image_size
# flow attributes
lowercase__ : List[Any] = train_size
# multimodal autoencoding attributes
lowercase__ : str = num_frames
lowercase__ : Any = audio_samples_per_frame
lowercase__ : Tuple = samples_per_patch
lowercase__ : Optional[Any] = output_shape
class UpperCAmelCase ( A__ ):
'''simple docstring'''
@property
def _lowerCAmelCase( self ) -> Optional[int]:
if self.task == "multiple-choice":
lowercase__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def _lowerCAmelCase( self ) -> int:
return 1E-4
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = 3 , __lowerCAmelCase = 40 , __lowerCAmelCase = 40 , ) -> Tuple:
if isinstance(_A , _A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ : Tuple = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ : Tuple = preprocessor.num_special_tokens_to_add(_A )
lowercase__ : Any = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
lowercase__ : Optional[Any] = [" ".join(['''a'''] ) * seq_length] * batch_size
lowercase__ : str = dict(preprocessor(_A , return_tensors=_A ) )
lowercase__ : List[str] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(_A , _A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ : Any = compute_effective_axis_dimension(_A , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase__ : Optional[int] = self._generate_dummy_images(_A , _A , _A , _A )
lowercase__ : Any = dict(preprocessor(images=_A , return_tensors=_A ) )
lowercase__ : Any = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 198 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase__ : List[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase__ : List[Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCamelCase_ ( lowerCAmelCase__ : Vector , lowerCAmelCase__ : Vector ) -> str:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(lowerCAmelCase__ ) - np.asarray(lowerCAmelCase__ )) ** 2 ) )
def UpperCamelCase_ ( lowerCAmelCase__ : Vector , lowerCAmelCase__ : Vector ) -> Tuple:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCamelCase_ ( ) -> str:
"""simple docstring"""
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) )
benchmark()
| 224 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a_ :
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=[1, 1, 2] , A=1 , A=32 , A=4 , A=8 , A=37 , A="gelu_new" , A=0.1 , A=0.1 , A=0.0 , A=512 , A=3 , A=0.02 , A=3 , A=4 , A=None , A=False , ) -> Dict:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = block_sizes
_SCREAMING_SNAKE_CASE = num_decoder_layers
_SCREAMING_SNAKE_CASE = d_model
_SCREAMING_SNAKE_CASE = n_head
_SCREAMING_SNAKE_CASE = d_head
_SCREAMING_SNAKE_CASE = d_inner
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout
_SCREAMING_SNAKE_CASE = attention_dropout
_SCREAMING_SNAKE_CASE = activation_dropout
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
_SCREAMING_SNAKE_CASE = initializer_std
# Used in the tests to check the size of the first attention layer
_SCREAMING_SNAKE_CASE = n_head
# Used in the tests to check the size of the first hidden state
_SCREAMING_SNAKE_CASE = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_SCREAMING_SNAKE_CASE = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_SCREAMING_SNAKE_CASE = self.num_hidden_layers + 2
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case_( self , A , A , A , A , A , A , A , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = TFFunnelModel(config=_A )
_SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE = model(_A )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(_A )
_SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = TFFunnelModel(config=_A )
_SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = TFFunnelModel(config=_A )
_SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def snake_case_( self , A , A , A , A , A , A , A , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = TFFunnelBaseModel(config=_A )
_SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE = model(_A )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(_A )
_SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = TFFunnelBaseModel(config=_A )
_SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = TFFunnelBaseModel(config=_A )
_SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def snake_case_( self , A , A , A , A , A , A , A , ) -> List[Any]:
_SCREAMING_SNAKE_CASE = TFFunnelForPreTraining(config=_A )
_SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def snake_case_( self , A , A , A , A , A , A , A , ) -> List[str]:
_SCREAMING_SNAKE_CASE = TFFunnelForMaskedLM(config=_A )
_SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_( self , A , A , A , A , A , A , A , ) -> Dict:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFFunnelForSequenceClassification(config=_A )
_SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_( self , A , A , A , A , A , A , A , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = TFFunnelForMultipleChoice(config=_A )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_( self , A , A , A , A , A , A , A , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFFunnelForTokenClassification(config=_A )
_SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_( self , A , A , A , A , A , A , A , ) -> List[Any]:
_SCREAMING_SNAKE_CASE = TFFunnelForQuestionAnswering(config=_A )
_SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_SCREAMING_SNAKE_CASE = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
_SCREAMING_SNAKE_CASE
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = TFFunnelModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A )
def snake_case_( self ) -> List[str]:
self.config_tester.run_common_tests()
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
@require_tf
class a_ ( A__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = TFFunnelModelTester(self , base=_A )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A )
def snake_case_( self ) -> List[Any]:
self.config_tester.run_common_tests()
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_A )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
| 58 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 0 |
from __future__ import annotations
__snake_case = list[tuple[int, int]]
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __snake_case :
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str =pos_x
UpperCAmelCase : Any =pos_y
UpperCAmelCase : str =(pos_y, pos_x)
UpperCAmelCase : Tuple =goal_x
UpperCAmelCase : Any =goal_y
UpperCAmelCase : str =g_cost
UpperCAmelCase : str =parent
UpperCAmelCase : Dict =self.calculate_heuristic()
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any =abs(self.pos_x - self.goal_x )
UpperCAmelCase : Optional[Any] =abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , snake_case__ ) -> Optional[int]:
'''simple docstring'''
return self.f_cost < other.f_cost
class __snake_case :
def __init__( self , snake_case__ , snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] =Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _A )
UpperCAmelCase : List[str] =Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , _A )
UpperCAmelCase : Optional[int] =[self.start]
UpperCAmelCase : list[Node] =[]
UpperCAmelCase : Dict =False
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase : List[str] =self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase : Optional[int] =True
return self.retrace_path(_A )
self.closed_nodes.append(_A )
UpperCAmelCase : List[str] =self.get_successors(_A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_A )
else:
# retrieve the best current path
UpperCAmelCase : Tuple =self.open_nodes.pop(self.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_A )
else:
self.open_nodes.append(_A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ ( self , snake_case__ ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] =[]
for action in delta:
UpperCAmelCase : Dict =parent.pos_x + action[1]
UpperCAmelCase : Dict =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_A , _A , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _A , ) )
return successors
def UpperCAmelCase__ ( self , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =node
UpperCAmelCase : Optional[int] =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase : Optional[Any] =current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
__snake_case = GreedyBestFirst(init, goal)
__snake_case = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__snake_case = 2
for elem in grid:
print(elem)
| 348 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 0 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
'''simple docstring'''
UpperCamelCase = """xlm-prophetnet"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Optional[Any] , a_ : Optional[float] = 0.1 , a_ : Optional[Union[str, Callable]] = "gelu" , a_ : Optional[int] = 3_05_22 , a_ : Optional[int] = 10_24 , a_ : Optional[int] = 40_96 , a_ : Optional[int] = 12 , a_ : Optional[int] = 16 , a_ : Optional[int] = 40_96 , a_ : Optional[int] = 12 , a_ : Optional[int] = 16 , a_ : Optional[float] = 0.1 , a_ : Optional[float] = 0.1 , a_ : Optional[int] = 5_12 , a_ : Optional[float] = 0.0_2 , a_ : Optional[bool] = True , a_ : Optional[bool] = True , a_ : Optional[int] = 0 , a_ : Optional[int] = 2 , a_ : Optional[int] = 32 , a_ : Optional[int] = 1_28 , a_ : Optional[bool] = False , a_ : Optional[float] = 0.0 , a_ : Optional[bool] = True , a_ : Optional[int] = 0 , a_ : Optional[int] = 1 , a_ : Optional[int] = 2 , **a_ : str , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : str = encoder_ffn_dim
__UpperCAmelCase : Any = num_encoder_layers
__UpperCAmelCase : Any = num_encoder_attention_heads
__UpperCAmelCase : int = decoder_ffn_dim
__UpperCAmelCase : Optional[int] = num_decoder_layers
__UpperCAmelCase : str = num_decoder_attention_heads
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = init_std # Normal(0, this parameter)
__UpperCAmelCase : Dict = activation_function
# parameters for xlmprophetnet
__UpperCAmelCase : int = ngram
__UpperCAmelCase : Optional[int] = num_buckets
__UpperCAmelCase : Optional[int] = relative_max_distance
__UpperCAmelCase : List[Any] = disable_ngram_loss
__UpperCAmelCase : List[str] = eps
# 3 Types of Dropout
__UpperCAmelCase : str = attention_dropout
__UpperCAmelCase : Tuple = activation_dropout
__UpperCAmelCase : List[Any] = dropout
__UpperCAmelCase : Any = use_cache
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , add_cross_attention=_A , decoder_start_token_id=_A , **_A , )
@property
def snake_case__ ( self : Tuple ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def snake_case__ ( self : Optional[int] , a_ : str ):
'''simple docstring'''
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 226 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_uncond_unet
__SCREAMING_SNAKE_CASE : Optional[Any] = PNDMScheduler()
__SCREAMING_SNAKE_CASE : Union[str, Any] = PNDMPipeline(unet=_A , scheduler=_A )
pndm.to(_A )
pndm.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pndm(generator=_A , num_inference_steps=20 , output_type='''numpy''' ).images
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[int] = pndm(generator=_A , num_inference_steps=20 , output_type='''numpy''' , return_dict=_A )[0]
__SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = "google/ddpm-cifar10-32"
__SCREAMING_SNAKE_CASE : Dict = UNetaDModel.from_pretrained(_A )
__SCREAMING_SNAKE_CASE : int = PNDMScheduler()
__SCREAMING_SNAKE_CASE : Union[str, Any] = PNDMPipeline(unet=_A , scheduler=_A )
pndm.to(_A )
pndm.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Tuple = pndm(generator=_A , output_type='''numpy''' ).images
__SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 303 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __snake_case ( A__):
snake_case__ : Any = "decision_transformer"
snake_case__ : Optional[int] = ["past_key_values"]
snake_case__ : Optional[int] = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , __lowerCAmelCase : Optional[int]=1_7 , __lowerCAmelCase : List[str]=4 , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[Any]=4_0_9_6 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1 , __lowerCAmelCase : str=1_0_2_4 , __lowerCAmelCase : int=3 , __lowerCAmelCase : Optional[Any]=1 , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]="relu" , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Union[str, Any]=1E-5 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : List[Any]=5_0_2_5_6 , __lowerCAmelCase : Any=5_0_2_5_6 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : str=False , **__lowerCAmelCase : str , ):
"""simple docstring"""
_lowerCamelCase : List[str] = state_dim
_lowerCamelCase : Union[str, Any] = act_dim
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Optional[int] = max_ep_len
_lowerCamelCase : Optional[int] = action_tanh
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : List[Any] = n_positions
_lowerCamelCase : Tuple = n_layer
_lowerCamelCase : Union[str, Any] = n_head
_lowerCamelCase : str = n_inner
_lowerCamelCase : List[Any] = activation_function
_lowerCamelCase : int = resid_pdrop
_lowerCamelCase : Dict = embd_pdrop
_lowerCamelCase : Union[str, Any] = attn_pdrop
_lowerCamelCase : Union[str, Any] = layer_norm_epsilon
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Optional[Any] = scale_attn_weights
_lowerCamelCase : Optional[int] = use_cache
_lowerCamelCase : Optional[int] = scale_attn_by_inverse_layer_idx
_lowerCamelCase : int = reorder_and_upcast_attn
_lowerCamelCase : Any = bos_token_id
_lowerCamelCase : Tuple = eos_token_id
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
| 72 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : int = tempfile.mkdtemp()
A : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
A : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
A : List[str] = os.path.join(self.tmpdirname, _A )
with open(self.image_processor_file, """w""", encoding="""utf-8""" ) as fp:
json.dump(_A, _A )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return BertTokenizer.from_pretrained(self.tmpdirname, **_A )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname, **_A )
def _lowerCAmelCase ( self, **lowerCamelCase__ ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname, **_A )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : Tuple = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
A : Optional[Any] = [Image.fromarray(np.moveaxis(_A, 0, -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
A : str = self.get_tokenizer()
A : Union[str, Any] = self.get_rust_tokenizer()
A : List[str] = self.get_image_processor()
A : Any = AlignProcessor(tokenizer=_A, image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
A : Tuple = AlignProcessor.from_pretrained(self.tmpdirname, use_fast=_A )
A : Optional[Any] = AlignProcessor(tokenizer=_A, image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
A : Tuple = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, _A )
self.assertIsInstance(processor_fast.tokenizer, _A )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, _A )
self.assertIsInstance(processor_fast.image_processor, _A )
def _lowerCAmelCase ( self ):
A : Tuple = AlignProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
A : List[Any] = self.get_image_processor(do_normalize=_A, padding_value=1.0 )
A : List[Any] = AlignProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=_A, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, _A )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, _A )
def _lowerCAmelCase ( self ):
A : str = self.get_image_processor()
A : Any = self.get_tokenizer()
A : Dict = AlignProcessor(tokenizer=_A, image_processor=_A )
A : Union[str, Any] = self.prepare_image_inputs()
A : List[str] = image_processor(_A, return_tensors="""np""" )
A : str = processor(images=_A, return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.get_image_processor()
A : int = self.get_tokenizer()
A : Optional[int] = AlignProcessor(tokenizer=_A, image_processor=_A )
A : Optional[int] = "lower newer"
A : List[str] = processor(text=_A )
A : List[Any] = tokenizer(_A, padding="""max_length""", max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def _lowerCAmelCase ( self ):
A : List[str] = self.get_image_processor()
A : Optional[Any] = self.get_tokenizer()
A : Optional[Any] = AlignProcessor(tokenizer=_A, image_processor=_A )
A : List[Any] = "lower newer"
A : Union[str, Any] = self.prepare_image_inputs()
A : Union[str, Any] = processor(text=_A, images=_A )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def _lowerCAmelCase ( self ):
A : str = self.get_image_processor()
A : Any = self.get_tokenizer()
A : Any = AlignProcessor(tokenizer=_A, image_processor=_A )
A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A : Any = processor.batch_decode(_A )
A : List[Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A, _A )
def _lowerCAmelCase ( self ):
A : Dict = self.get_image_processor()
A : Tuple = self.get_tokenizer()
A : Any = AlignProcessor(tokenizer=_A, image_processor=_A )
A : str = "lower newer"
A : Optional[int] = self.prepare_image_inputs()
A : Optional[Any] = processor(text=_A, images=_A )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 116 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 0 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def _A ( A__ ):
"""simple docstring"""
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
UpperCamelCase = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : int = list(s_dict.keys() )
for key in keys:
A_ : int = R".*/layers_(\d+)"
A_ : List[Any] = key
if re.match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Any = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , SCREAMING_SNAKE_CASE )
A_ : List[Any] = R"(encoder|decoder)\/"
if re.match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : str = re.match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).groups()
if groups[0] == "encoder":
A_ : Any = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , SCREAMING_SNAKE_CASE )
A_ : List[Any] = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , SCREAMING_SNAKE_CASE )
elif groups[0] == "decoder":
A_ : List[str] = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , SCREAMING_SNAKE_CASE )
A_ : Dict = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , SCREAMING_SNAKE_CASE )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A_ : List[Any] = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''{key} -> {new_key}''' )
A_ : List[Any] = s_dict.pop(SCREAMING_SNAKE_CASE )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A_ : Union[str, Any] = s_dict[key].shape[0]
A_ : List[Any] = s_dict[key]
for idx in range(SCREAMING_SNAKE_CASE ):
A_ : Tuple = expert_weihts[idx]
print(f'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(SCREAMING_SNAKE_CASE )
return s_dict
UpperCamelCase = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
import regex as re
with open(SCREAMING_SNAKE_CASE , '''r''' ) as f:
A_ : Optional[Any] = f.read()
A_ : List[str] = re.findall(r'''(.*) = ([0-9.]*)''' , SCREAMING_SNAKE_CASE )
A_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A_ : int = float(SCREAMING_SNAKE_CASE ) if "." in value else int(SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , SCREAMING_SNAKE_CASE )[0]
A_ : List[str] = str(activation[1] )
A_ : str = num_experts
A_ : Tuple = SwitchTransformersConfig(**SCREAMING_SNAKE_CASE )
return config
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="./" , SCREAMING_SNAKE_CASE=8 ):
print(f'''Loading flax weights from : {flax_checkpoint_path}''' )
A_ : int = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
if gin_file is not None:
A_ : int = convert_gin_to_config(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
A_ : Dict = SwitchTransformersConfig.from_pretrained(SCREAMING_SNAKE_CASE )
A_ : str = SwitchTransformersForConditionalGeneration(SCREAMING_SNAKE_CASE )
A_ : Optional[int] = flax_params["target"]
A_ : List[str] = flatten_dict(SCREAMING_SNAKE_CASE , sep='''/''' )
A_ : List[str] = rename_keys(SCREAMING_SNAKE_CASE )
A_ : Optional[int] = unflatten_dict(SCREAMING_SNAKE_CASE , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
UpperCamelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 186 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowercase (A__ ):
"""simple docstring"""
_snake_case = (DPMSolverSinglestepScheduler,)
_snake_case = (("""num_inference_steps""", 25),)
def UpperCAmelCase ( self , **A ) -> List[Any]:
snake_case : str = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("""inf""" ),
"variance_type": None,
}
config.update(**_A )
return config
def UpperCAmelCase ( self , A=0 , **A ) -> Tuple:
snake_case : List[Any] = dict(self.forward_default_kwargs )
snake_case : Tuple = kwargs.pop("""num_inference_steps""" , _A )
snake_case : Dict = self.dummy_sample
snake_case : Optional[Any] = 0.1 * sample
snake_case : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case : Tuple = self.get_scheduler_config(**_A )
snake_case : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
snake_case : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
snake_case : Optional[int] = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
snake_case : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case : str = sample, sample
for t in range(_A , time_step + scheduler.config.solver_order + 1 ):
snake_case : str = scheduler.step(_A , _A , _A , **_A ).prev_sample
snake_case : Union[str, Any] = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self , A=0 , **A ) -> Any:
snake_case : List[str] = dict(self.forward_default_kwargs )
snake_case : Tuple = kwargs.pop("""num_inference_steps""" , _A )
snake_case : int = self.dummy_sample
snake_case : int = 0.1 * sample
snake_case : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case : Tuple = self.get_scheduler_config()
snake_case : Optional[int] = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
snake_case : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
snake_case : Dict = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
snake_case : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case : List[Any] = scheduler.step(_A , _A , _A , **_A ).prev_sample
snake_case : Dict = new_scheduler.step(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self , A=None , **A ) -> str:
if scheduler is None:
snake_case : Dict = self.scheduler_classes[0]
snake_case : Optional[Any] = self.get_scheduler_config(**_A )
snake_case : int = scheduler_class(**_A )
snake_case : Tuple = self.scheduler_classes[0]
snake_case : int = self.get_scheduler_config(**_A )
snake_case : str = scheduler_class(**_A )
snake_case : List[Any] = 1_0
snake_case : Any = self.dummy_model()
snake_case : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
snake_case : int = model(_A , _A )
snake_case : List[Any] = scheduler.step(_A , _A , _A ).prev_sample
return sample
def UpperCAmelCase ( self ) -> List[str]:
snake_case : List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case : Union[str, Any] = 5_0
snake_case : Tuple = self.dummy_model()
snake_case : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(_A )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
snake_case : Any = model(_A , _A )
snake_case : Tuple = scheduler.step(_A , _A , _A ).prev_sample
snake_case : str = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.25_74 ) < 1e-3
def UpperCAmelCase ( self ) -> Any:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_A )
def UpperCAmelCase ( self ) -> Any:
snake_case : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
snake_case : Tuple = self.full_loop(scheduler=_A )
snake_case : List[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.27_91 ) < 1e-3
snake_case : Tuple = DEISMultistepScheduler.from_config(scheduler.config )
snake_case : str = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case : List[str] = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case : int = self.full_loop(scheduler=_A )
snake_case : int = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.27_91 ) < 1e-3
def UpperCAmelCase ( self ) -> List[str]:
self.check_over_configs(thresholding=_A )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , algorithm_type="""dpmsolver++""" , solver_order=_A , solver_type=_A , )
def UpperCAmelCase ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , )
snake_case : str = self.full_loop(
solver_order=_A , solver_type=_A , prediction_type=_A , algorithm_type=_A , )
assert not torch.isnan(_A ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self ) -> Tuple:
self.check_over_configs(lower_order_final=_A )
self.check_over_configs(lower_order_final=_A )
def UpperCAmelCase ( self ) -> int:
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase ( self ) -> List[Any]:
self.check_over_configs(variance_type=_A )
self.check_over_configs(variance_type="""learned_range""" )
def UpperCAmelCase ( self ) -> int:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=_A , time_step=0 )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Dict = self.full_loop()
snake_case : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.27_91 ) < 1e-3
def UpperCAmelCase ( self ) -> int:
snake_case : Dict = self.full_loop(use_karras_sigmas=_A )
snake_case : Any = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.22_48 ) < 1e-3
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[Any] = self.full_loop(prediction_type="""v_prediction""" )
snake_case : List[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.14_53 ) < 1e-3
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : List[str] = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=_A )
snake_case : Union[str, Any] = torch.mean(torch.abs(_A ) )
assert abs(result_mean.item() - 0.06_49 ) < 1e-3
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[int] = self.scheduler_classes[0]
snake_case : Optional[Any] = self.get_scheduler_config(thresholding=_A , dynamic_thresholding_ratio=0 )
snake_case : Any = scheduler_class(**_A )
snake_case : Any = 1_0
snake_case : Union[str, Any] = self.dummy_model()
snake_case : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.timesteps ):
snake_case : Any = model(_A , _A )
snake_case : Dict = scheduler.step(_A , _A , _A ).prev_sample
assert sample.dtype == torch.floataa
| 124 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 0 |
'''simple docstring'''
import cva
import numpy as np
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ) -> str:
if k in (0.0_4, 0.0_6):
lowercase__ : Union[str, Any] = k
lowercase__ : str = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) -> List[Any]:
return str(self.k )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : Dict = cva.imread(_A , 0 )
lowercase__ : int = img.shape
lowercase__ : list[list[int]] = []
lowercase__ : Optional[Any] = img.copy()
lowercase__ : Optional[Any] = cva.cvtColor(_A , cva.COLOR_GRAY2RGB )
lowercase__ : List[Any] = np.gradient(_A )
lowercase__ : str = dx**2
lowercase__ : Optional[Any] = dy**2
lowercase__ : Optional[Any] = dx * dy
lowercase__ : Optional[Any] = 0.0_4
lowercase__ : str = self.window_size // 2
for y in range(_A , h - offset ):
for x in range(_A , w - offset ):
lowercase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ : int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase__ : int = (wxx * wyy) - (wxy**2)
lowercase__ : Optional[int] = wxx + wyy
lowercase__ : List[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__a: Optional[int] = HarrisCorner(0.04, 3)
__a: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 198 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
from statistics import mean, stdev
def UpperCamelCase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : int = 3 ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : str = min(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = max(lowerCAmelCase__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase__ ) for x in data]
def UpperCamelCase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : int = 3 ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : int = mean(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = stdev(lowerCAmelCase__ )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase__ ) for x in data]
| 224 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 0 |
'''simple docstring'''
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowercase_ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a_ ( A__ ):
'''simple docstring'''
def __init__( self , *A , A=None , A=None , A=None , **A ) -> Any:
super().__init__(*_A , **_A )
_SCREAMING_SNAKE_CASE = eval_examples
_SCREAMING_SNAKE_CASE = post_process_function
_SCREAMING_SNAKE_CASE = quant_trainer_args
_SCREAMING_SNAKE_CASE = 128 # default number of calibration samples
def snake_case_( self , A=None ) -> Dict:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
_SCREAMING_SNAKE_CASE = calib_dataset if calib_dataset is not None else self.calib_dataset
_SCREAMING_SNAKE_CASE = self._remove_unused_columns(_A , description="""Calibration""" )
return DataLoader(
_A , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_A , )
def snake_case_( self , A=None ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.train_dataset if calib_dataset is None else calib_dataset
_SCREAMING_SNAKE_CASE = self.get_calib_dataloader(_A )
_SCREAMING_SNAKE_CASE = self.model
quant_trainer.configure_model(_A , self.quant_trainer_args , calib=_A )
model.eval()
quant_trainer.enable_calibration(_A )
logger.info("""***** Running calibration *****""" )
logger.info(f' Num examples = {self.calib_num}' )
logger.info(f' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(_A ):
# Prediction step
_SCREAMING_SNAKE_CASE = self.prediction_step(_A , _A , prediction_loss_only=_A )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_A , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = model
def snake_case_( self , A=None , A=None , A=None , A = "eval" ) -> Any:
_SCREAMING_SNAKE_CASE = self.eval_dataset if eval_dataset is None else eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(_A )
_SCREAMING_SNAKE_CASE = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
_A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
_SCREAMING_SNAKE_CASE = self.post_process_function(_A , _A , output.predictions )
_SCREAMING_SNAKE_CASE = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
_SCREAMING_SNAKE_CASE = metrics.pop(_A )
self.log(_A )
else:
_SCREAMING_SNAKE_CASE = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_SCREAMING_SNAKE_CASE = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A )
return metrics
def snake_case_( self , A , A , A=None , A = "test" ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
_SCREAMING_SNAKE_CASE = self.compute_metrics
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_SCREAMING_SNAKE_CASE = eval_loop(
_A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , )
finally:
_SCREAMING_SNAKE_CASE = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
_SCREAMING_SNAKE_CASE = self.post_process_function(_A , _A , output.predictions , """predict""" )
_SCREAMING_SNAKE_CASE = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
_SCREAMING_SNAKE_CASE = metrics.pop(_A )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
def snake_case_( self , A="./" ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.eval_dataset
_SCREAMING_SNAKE_CASE = self.get_eval_dataloader(_A )
_SCREAMING_SNAKE_CASE = next(iter(_A ) )
# saving device - to make it consistent
_SCREAMING_SNAKE_CASE = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
_SCREAMING_SNAKE_CASE = tuple(v.to(_A ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.model.to(_A )
model.eval()
model.float()
_SCREAMING_SNAKE_CASE = model.module if hasattr(_A , """module""" ) else model
quant_trainer.configure_model(_A , self.quant_trainer_args )
_SCREAMING_SNAKE_CASE = os.path.join(_A , """model.onnx""" )
logger.info(f'exporting model to {output_model_file}' )
_SCREAMING_SNAKE_CASE = {0: "batch_size", 1: "seq_len"}
torch.onnx.export(
_A , _A , _A , export_params=_A , opset_version=13 , do_constant_folding=_A , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=_A , )
logger.info("""onnx export finished""" )
| 58 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( A__ , unittest.TestCase ):
__lowerCamelCase : List[Any] = CodeGenTokenizer
__lowerCamelCase : str = CodeGenTokenizerFast
__lowerCamelCase : str = True
__lowerCamelCase : Optional[int] = {"""add_prefix_space""": True}
__lowerCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : Optional[int] =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
UpperCAmelCase : Optional[int] =dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase : Union[str, Any] =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase : List[str] ={"unk_token": "<unk>"}
UpperCAmelCase : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def UpperCAmelCase__ ( self , **snake_case__ ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self , **snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any ="lower newer"
UpperCAmelCase : Dict ="lower newer"
return input_text, output_text
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] =CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : int ="lower newer"
UpperCAmelCase : List[Any] =["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
UpperCAmelCase : Tuple =tokenizer.tokenize(_A , add_prefix_space=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : int =tokens + [tokenizer.unk_token]
UpperCAmelCase : List[str] =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Tuple =self.get_tokenizer()
UpperCAmelCase : Any =self.get_rust_tokenizer(add_prefix_space=_A )
UpperCAmelCase : List[str] ="lower newer"
# Testing tokenization
UpperCAmelCase : Optional[Any] =tokenizer.tokenize(_A , add_prefix_space=_A )
UpperCAmelCase : Any =rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
# Testing conversion to ids without special tokens
UpperCAmelCase : int =tokenizer.encode(_A , add_special_tokens=_A , add_prefix_space=_A )
UpperCAmelCase : Optional[Any] =rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
# Testing conversion to ids with special tokens
UpperCAmelCase : int =self.get_rust_tokenizer(add_prefix_space=_A )
UpperCAmelCase : int =tokenizer.encode(_A , add_prefix_space=_A )
UpperCAmelCase : Optional[int] =rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# Testing the unknown token
UpperCAmelCase : int =tokens + [rust_tokenizer.unk_token]
UpperCAmelCase : Any =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_A ) , _A )
def UpperCAmelCase__ ( self , *snake_case__ , **snake_case__ ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self , snake_case__=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Tuple =self.rust_tokenizer_class.from_pretrained(_A , **_A )
# Simple input
UpperCAmelCase : Optional[int] ="This is a simple input"
UpperCAmelCase : Dict =["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase : Optional[Any] =("This is a simple input", "This is a pair")
UpperCAmelCase : Union[str, Any] =[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' )
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int =CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
UpperCAmelCase : Any ="This is a simple input"
UpperCAmelCase : str =["This is a simple input looooooooong", "This is a simple input"]
UpperCAmelCase : int =("This is a simple input", "This is a pair")
UpperCAmelCase : List[str] =[
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
UpperCAmelCase : Dict =tokenizer.pad_token_id
UpperCAmelCase : str =tokenizer(_A , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
UpperCAmelCase : Tuple =tokenizer(_A , padding=_A , truncate=_A , return_tensors='''np''' )
UpperCAmelCase : Union[str, Any] =tokenizer(*_A , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
UpperCAmelCase : Union[str, Any] =tokenizer(_A , padding=_A , truncate=_A , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple ="$$$"
UpperCAmelCase : str =CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=_A , add_bos_token=_A )
UpperCAmelCase : str ="This is a simple input"
UpperCAmelCase : Optional[Any] =["This is a simple input 1", "This is a simple input 2"]
UpperCAmelCase : Tuple =tokenizer.bos_token_id
UpperCAmelCase : List[Any] =tokenizer(_A )
UpperCAmelCase : Optional[int] =tokenizer(_A )
self.assertEqual(out_s.input_ids[0] , _A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCAmelCase : Optional[int] =tokenizer.decode(out_s.input_ids )
UpperCAmelCase : int =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
UpperCAmelCase : List[str] ="\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
UpperCAmelCase : int ="\nif len_a > len_b: result = a\nelse: result = b"
UpperCAmelCase : Optional[int] =tokenizer.encode(_A )
UpperCAmelCase : Union[str, Any] =["^#", re.escape('''<|endoftext|>''' ), "^'''", "^\"\"\"", "\n\n\n"]
UpperCAmelCase : Optional[Any] =tokenizer.decode(_A , truncate_before_pattern=_A )
self.assertEqual(_A , _A )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
| 348 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A ='''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def a ( _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : int=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Union[str, Any]=None , ):
'''simple docstring'''
if attention_mask is None:
__UpperCAmelCase : int = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__UpperCAmelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__UpperCAmelCase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[int] , a_ : str , a_ : int=13 , a_ : Union[str, Any]=7 , a_ : List[Any]=True , a_ : Optional[int]=False , a_ : Tuple=99 , a_ : Optional[int]=16 , a_ : List[str]=2 , a_ : int=4 , a_ : Any=4 , a_ : Any="gelu" , a_ : Union[str, Any]=0.1 , a_ : List[Any]=0.1 , a_ : Union[str, Any]=32 , a_ : List[str]=2 , a_ : Tuple=1 , a_ : Optional[int]=0 , a_ : List[Any]=0.0_2 , ):
'''simple docstring'''
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : List[Any] = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Tuple = eos_token_id
__UpperCAmelCase : int = pad_token_id
__UpperCAmelCase : Any = bos_token_id
__UpperCAmelCase : Union[str, Any] = initializer_range
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCAmelCase : Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCAmelCase : Union[str, Any] = shift_tokens_right(_A , 1 , 2 )
__UpperCAmelCase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
__UpperCAmelCase : List[Any] = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self : List[Any] , a_ : Tuple , a_ : Tuple , a_ : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 20
__UpperCAmelCase : Any = model_class_name(_A )
__UpperCAmelCase : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase : Dict = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
__UpperCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__UpperCAmelCase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
__UpperCAmelCase : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
__UpperCAmelCase : Optional[int] = model.decode(_A , _A )
__UpperCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def snake_case__ ( self : Union[str, Any] , a_ : int , a_ : Tuple , a_ : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 20
__UpperCAmelCase : int = model_class_name(_A )
__UpperCAmelCase : List[str] = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase : str = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__UpperCAmelCase : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCAmelCase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
__UpperCAmelCase : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
__UpperCAmelCase : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase : int = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
__UpperCAmelCase : Tuple = model.decode(_A , _A , decoder_attention_mask=_A )
__UpperCAmelCase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = 99
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__UpperCAmelCase : Any = input_ids.shape[0]
__UpperCAmelCase : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self._get_config_and_data()
__UpperCAmelCase : Dict = FlaxBlenderbotSmallForConditionalGeneration(_A )
__UpperCAmelCase : Dict = lm_model(input_ids=_A )
__UpperCAmelCase : List[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__UpperCAmelCase : Dict = FlaxBlenderbotSmallForConditionalGeneration(_A )
__UpperCAmelCase : int = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__UpperCAmelCase : Tuple = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__UpperCAmelCase : Any = lm_model(input_ids=_A , decoder_input_ids=_A )
__UpperCAmelCase : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__UpperCAmelCase : Optional[int] = shift_tokens_right(_A , 1 , 2 )
__UpperCAmelCase : int = np.equal(_A , 1 ).astype(np.floataa ).sum()
__UpperCAmelCase : Optional[int] = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCAmelCase__ ( A__ ,unittest.TestCase ,A__ ):
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCamelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : str = FlaxBlenderbotSmallModelTester(self )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : Optional[int] = self._prepare_for_class(_A , _A )
__UpperCAmelCase : List[str] = model_class(_A )
@jax.jit
def encode_jitted(a_ : Dict , a_ : Optional[int]=None , **a_ : int ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase : Tuple = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase : Dict = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : str = model_class(_A )
__UpperCAmelCase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__UpperCAmelCase : Dict = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(a_ : str , a_ : Any , a_ : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase : Tuple = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase : Optional[Any] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Dict = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCAmelCase : str = np.ones((1, 1) ) * model.config.eos_token_id
__UpperCAmelCase : Any = model(_A )
self.assertIsNotNone(_A )
| 226 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowercase_ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def a__ ( snake_case ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : Tuple = False
elif args.student_type == "gpt2":
__SCREAMING_SNAKE_CASE : int = False
def a__ ( snake_case , snake_case ):
"""simple docstring"""
if args.student_type == "roberta":
__SCREAMING_SNAKE_CASE : str = False
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=snake_case , required=snake_case , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=snake_case , required=snake_case , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=snake_case , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=snake_case , required=snake_case , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=snake_case , type=snake_case , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=snake_case , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=snake_case , required=snake_case , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=snake_case , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=snake_case , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=snake_case , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=snake_case , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=snake_case , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=snake_case , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=snake_case , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=snake_case , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=snake_case , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=snake_case , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=snake_case , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=snake_case , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=snake_case , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=snake_case , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=snake_case , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=snake_case , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=snake_case , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=snake_case , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=snake_case , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=snake_case , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=snake_case , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=snake_case , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=snake_case , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=snake_case , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=snake_case , default=4_000 , help='''Checkpoint interval.''' )
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
sanity_checks(snake_case )
# ARGS #
init_gpu_params(snake_case )
set_seed(snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(snake_case ) , snake_case , indent=4 )
git_log(args.dump_path )
__SCREAMING_SNAKE_CASE : List[Any] = MODEL_CLASSES[args.student_type]
__SCREAMING_SNAKE_CASE : str = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__SCREAMING_SNAKE_CASE : Dict = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.all_special_tokens.index(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
__SCREAMING_SNAKE_CASE : Tuple = special_tok_ids
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : Optional[int] = pickle.load(snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : str = pickle.load(snake_case )
__SCREAMING_SNAKE_CASE : int = np.maximum(snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__SCREAMING_SNAKE_CASE : str = 0.0 # do not predict special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(snake_case )
else:
__SCREAMING_SNAKE_CASE : Tuple = None
__SCREAMING_SNAKE_CASE : List[str] = LmSeqsDataset(params=snake_case , data=snake_case )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = student_config_class.from_pretrained(args.student_config )
__SCREAMING_SNAKE_CASE : List[Any] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=snake_case )
else:
__SCREAMING_SNAKE_CASE : List[str] = student_model_class(snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
__SCREAMING_SNAKE_CASE : List[Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(snake_case , snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(snake_case , snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : int = Distiller(
params=snake_case , dataset=snake_case , token_probs=snake_case , student=snake_case , teacher=snake_case )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 303 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( A__ , unittest.TestCase):
snake_case__ : str = LEDTokenizer
snake_case__ : List[str] = LEDTokenizerFast
snake_case__ : str = True
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().setUp()
_lowerCamelCase : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_lowerCamelCase : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
_lowerCamelCase : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def SCREAMING_SNAKE_CASE ( self : str , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Any ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_lowerCamelCase : List[Any] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Optional[Any] = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_lowerCamelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
@require_torch
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : int = tokenizer(_A , padding=_A , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''labels''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Union[str, Any] = tokenizer(text_target=_A , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : List[str] = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=_A , truncation=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = ["A long paragraph for summarization."]
_lowerCamelCase : str = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : Optional[Any] = tokenizer(_A , return_tensors='''pt''' )
_lowerCamelCase : Optional[int] = tokenizer(text_target=_A , return_tensors='''pt''' )
_lowerCamelCase : Optional[int] = inputs["input_ids"]
_lowerCamelCase : Any = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_lowerCamelCase : str = ["Summary of the text.", "Another summary."]
_lowerCamelCase : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_lowerCamelCase : Optional[int] = tokenizer(_A , padding=_A )
_lowerCamelCase : List[Any] = [[0] * len(_A ) for x in encoded_output["input_ids"]]
_lowerCamelCase : Optional[Any] = tokenizer.pad(_A )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _A )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
_lowerCamelCase : Dict = self.tokenizer_class.from_pretrained(_A , **_A )
_lowerCamelCase : Any = "A, <mask> AllenNLP sentence."
_lowerCamelCase : Tuple = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
_lowerCamelCase : Any = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_lowerCamelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_lowerCamelCase : str = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 72 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_:Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 116 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 0 |
'''simple docstring'''
def _A ( A__ = 3 , A__ = 7 , A__ = 1000000 ):
"""simple docstring"""
__lowercase = 0
__lowercase = 1
for current_denominator in range(1 , limit + 1 ):
__lowercase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__lowercase = current_numerator
__lowercase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 104 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCamelCase = HfApi()
UpperCamelCase = {}
# fmt: off
UpperCamelCase = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
UpperCamelCase = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
UpperCamelCase = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
UpperCamelCase = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
UpperCamelCase = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
UpperCamelCase = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
UpperCamelCase = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
UpperCamelCase = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
UpperCamelCase = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
UpperCamelCase = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
UpperCamelCase = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
UpperCamelCase = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
UpperCamelCase = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
UpperCamelCase = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
UpperCamelCase = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
UpperCamelCase = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCamelCase = '''/home/patrick/google_checkpoints/''' + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
UpperCamelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCamelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCamelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCamelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 186 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Optional[Any] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __lowercase (A__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = AlbertTokenizer
_snake_case = AlbertTokenizerFast
_snake_case = True
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : Optional[int] = AlbertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , A ) -> Optional[int]:
snake_case : Optional[Any] = "this is a test"
snake_case : Optional[Any] = "this is a test"
return input_text, output_text
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = "<pad>"
snake_case : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(_A ) , 3_0_0_0_0 )
def UpperCAmelCase ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def UpperCAmelCase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
snake_case : List[str] = self.get_tokenizer()
snake_case : Tuple = self.get_rust_tokenizer()
snake_case : List[str] = "I was born in 92000, and this is falsé."
snake_case : str = tokenizer.tokenize(_A )
snake_case : List[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
snake_case : Any = tokenizer.encode(_A , add_special_tokens=_A )
snake_case : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : Tuple = tokenizer.encode(_A )
snake_case : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Any = AlbertTokenizer(_A , keep_accents=_A )
snake_case : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [4_8, 2_5, 2_1, 1_2_8_9] )
snake_case : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
snake_case : int = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(_A , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
snake_case : List[str] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : List[str] = AlbertTokenizer(_A )
snake_case : List[str] = tokenizer.encode("""sequence builders""" )
snake_case : Dict = tokenizer.encode("""multi-sequence build""" )
snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(_A )
snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCAmelCase ( self ) -> List[str]:
snake_case : List[str] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 124 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 0 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
__a: Any = int(input("""Enter number: """).strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 198 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( A__, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline
_SCREAMING_SNAKE_CASE = ["""prompt"""]
_SCREAMING_SNAKE_CASE = ["""prompt""", """negative_prompt"""]
_SCREAMING_SNAKE_CASE = [
"""num_images_per_prompt""",
"""generator""",
"""num_inference_steps""",
"""latents""",
"""negative_prompt""",
"""guidance_scale""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE = False
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return 3_2
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return 3_2
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return 1_0_0
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
torch.manual_seed(0 )
lowerCAmelCase_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(_A )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowerCAmelCase_ : List[Any] = PriorTransformer(**_A )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowerCAmelCase_ : Dict = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowerCAmelCase_ : Optional[Any] = CLIPVisionModelWithProjection(_A )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=2_2_4 , )
return image_processor
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : Dict = self.dummy_prior
lowerCAmelCase_ : List[str] = self.dummy_image_encoder
lowerCAmelCase_ : Any = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = self.dummy_tokenizer
lowerCAmelCase_ : Optional[int] = self.dummy_image_processor
lowerCAmelCase_ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=10.0 , )
lowerCAmelCase_ : str = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict=0 ):
if str(_A ).startswith('mps' ):
lowerCAmelCase_ : Tuple = torch.manual_seed(_A )
else:
lowerCAmelCase_ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
lowerCAmelCase_ : str = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowerCAmelCase_ : Dict = "cpu"
lowerCAmelCase_ : List[Any] = self.get_dummy_components()
lowerCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
lowerCAmelCase_ : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowerCAmelCase_ : List[str] = pipe(**self.get_dummy_inputs(_A ) )
lowerCAmelCase_ : int = output.image_embeds
lowerCAmelCase_ : Any = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
lowerCAmelCase_ : Any = image[0, -1_0:]
lowerCAmelCase_ : List[Any] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowerCAmelCase_ : Optional[int] = np.array(
[-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Optional[Any] = torch_device == "cpu"
lowerCAmelCase_ : str = True
lowerCAmelCase_ : List[str] = False
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , )
@skip_mps
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : List[str] = torch_device == "cpu"
lowerCAmelCase_ : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=_A , test_mean_pixel_difference=_A , )
| 224 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
'''simple docstring'''
import math
import unittest
def lowerCamelCase ( __lowerCamelCase : int ) ->List[Any]:
assert isinstance(__lowerCamelCase , __lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_( self ) -> Any:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def snake_case_( self ) -> List[Any]:
with self.assertRaises(_A ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 58 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __snake_case ( A__ ):
@slow
@require_torch
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
UpperCAmelCase : Dict =BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCAmelCase : Union[str, Any] =bertabert.config.encoder.vocab_size
UpperCAmelCase : Optional[Any] =tokenizer.sep_token_id
UpperCAmelCase : Dict =tokenizer.cls_token_id
UpperCAmelCase : Optional[Any] =128
UpperCAmelCase : Optional[int] =datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
UpperCAmelCase : Any =datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
UpperCAmelCase : Tuple =train_dataset.select(range(32 ) )
UpperCAmelCase : int =val_dataset.select(range(16 ) )
UpperCAmelCase : List[str] =4
def _map_to_encoder_decoder_inputs(snake_case__ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase : List[Any] =tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_A , max_length=512 )
UpperCAmelCase : List[Any] =tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_A , max_length=128 )
UpperCAmelCase : Optional[Any] =inputs.input_ids
UpperCAmelCase : List[Any] =inputs.attention_mask
UpperCAmelCase : Dict =outputs.input_ids
UpperCAmelCase : Optional[int] =outputs.input_ids.copy()
UpperCAmelCase : Union[str, Any] =[
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
UpperCAmelCase : List[str] =outputs.attention_mask
assert all(len(_A ) == 512 for x in inputs.input_ids )
assert all(len(_A ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(snake_case__ ):
UpperCAmelCase : Union[str, Any] =pred.label_ids
UpperCAmelCase : str =pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase : List[str] =tokenizer.batch_decode(_A , skip_special_tokens=_A )
UpperCAmelCase : List[Any] =tokenizer.batch_decode(_A , skip_special_tokens=_A )
UpperCAmelCase : Any =sum([int(pred_str[i] == label_str[i] ) for i in range(len(_A ) )] ) / len(_A )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase : int =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_A , batch_size=_A , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
UpperCAmelCase : Optional[int] =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_A , batch_size=_A , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
UpperCAmelCase : Tuple =self.get_auto_remove_tmp_dir()
UpperCAmelCase : List[Any] =SeqaSeqTrainingArguments(
output_dir=_A , per_device_train_batch_size=_A , per_device_eval_batch_size=_A , predict_with_generate=_A , evaluation_strategy='''steps''' , do_train=_A , do_eval=_A , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase : Union[str, Any] =SeqaSeqTrainer(
model=_A , args=_A , compute_metrics=_compute_metrics , train_dataset=_A , eval_dataset=_A , tokenizer=_A , )
# start training
trainer.train()
| 348 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 0 |
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = generate_pascal_triangle(_UpperCAmelCase )
for row_idx in range(_UpperCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCAmelCase : list[list[int]] = []
for current_row_idx in range(_UpperCAmelCase ):
__UpperCAmelCase : str = populate_current_row(_UpperCAmelCase , _UpperCAmelCase )
triangle.append(_UpperCAmelCase )
return triangle
def a ( _UpperCAmelCase : list[list[int]] , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : str = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCAmelCase : Tuple = 1, 1
for current_col_idx in range(1 , _UpperCAmelCase ):
calculate_current_element(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return current_row
def a ( _UpperCAmelCase : list[list[int]] , _UpperCAmelCase : list[int] , _UpperCAmelCase : int , _UpperCAmelCase : int , ):
'''simple docstring'''
__UpperCAmelCase : Any = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCAmelCase : List[Any] = triangle[current_row_idx - 1][current_col_idx]
__UpperCAmelCase : Union[str, Any] = above_to_left_elt + above_to_right_elt
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCAmelCase : list[list[int]] = [[1]]
for row_index in range(1 , _UpperCAmelCase ):
__UpperCAmelCase : Tuple = [0] + result[-1] + [0]
__UpperCAmelCase : List[str] = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCAmelCase : Optional[Any] = sum(divmod(_UpperCAmelCase , 2 ) )
__UpperCAmelCase : List[Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCAmelCase : Optional[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCAmelCase : Tuple = row_first_half + row_second_half
result.append(_UpperCAmelCase )
return result
def a ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCAmelCase : Callable , _UpperCAmelCase : int ) -> None:
__UpperCAmelCase : Optional[int] = f'{func.__name__}({value})'
__UpperCAmelCase : str = timeit(f'__main__.{call}' , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 226 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 0 |
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = len(snake_case )
for i in range(length - 1 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = i
for k in range(i + 1 , snake_case ):
if collection[k] < collection[least]:
__SCREAMING_SNAKE_CASE : Optional[int] = k
if least != i:
__SCREAMING_SNAKE_CASE : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 303 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
import baseaa
def snake_case_ ( A_ : str ):
'''simple docstring'''
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def snake_case_ ( A_ : bytes ):
'''simple docstring'''
return baseaa.aaadecode(A_ ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 0 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = inspect.getfile(accelerate.test_utils )
__lowerCamelCase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
__lowerCamelCase : Any = ["accelerate", "launch"]
__lowerCamelCase : List[Any] = Path.home() / ".cache/huggingface/accelerate"
__lowerCamelCase : Optional[Any] = "default_config.yaml"
__lowerCamelCase : Dict = config_folder / config_file
__lowerCamelCase : str = config_folder / "_default_config.yaml"
__lowerCamelCase : int = Path("tests/test_configs" )
@classmethod
def _lowerCAmelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _lowerCAmelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _lowerCAmelCase ( self ):
A : str = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path], env=os.environ.copy() )
def _lowerCAmelCase ( self ):
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=_A ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(_A ), self.test_file_path], env=os.environ.copy() )
def _lowerCAmelCase ( self ):
execute_subprocess_async(["""accelerate""", """test"""], env=os.environ.copy() )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = "test-tpu"
__lowerCamelCase : Optional[Any] = "us-central1-a"
__lowerCamelCase : Any = "ls"
__lowerCamelCase : List[Any] = ["accelerate", "tpu-config"]
__lowerCamelCase : Optional[Any] = "cd /usr/share"
__lowerCamelCase : Union[str, Any] = "tests/test_samples/test_command_file.sh"
__lowerCamelCase : Tuple = "Running gcloud compute tpus tpu-vm ssh"
def _lowerCAmelCase ( self ):
A : Dict = run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""], return_stdout=_A, )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''', _A, )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
], return_stdout=_A, )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''', _A, )
def _lowerCAmelCase ( self ):
A : Optional[Any] = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""], return_stdout=_A )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''', _A, )
def _lowerCAmelCase ( self ):
A : int = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""], return_stdout=_A, )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''', _A, )
def _lowerCAmelCase ( self ):
A : int = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
], return_stdout=_A, )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''', _A, )
def _lowerCAmelCase ( self ):
A : Tuple = run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""], return_stdout=_A, )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''', _A, )
def _lowerCAmelCase ( self ):
A : Any = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
], return_stdout=_A, )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''', _A, )
def _lowerCAmelCase ( self ):
A : List[str] = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""], return_stdout=_A, )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''', _A, )
def _lowerCAmelCase ( self ):
A : Optional[int] = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
], return_stdout=_A, )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''', _A, )
| 116 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = len(SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
A_ : str = [[0 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
A_ : Union[str, Any] = run_maze(SCREAMING_SNAKE_CASE , 0 , 0 , SCREAMING_SNAKE_CASE )
if solved:
print('''\n'''.join(str(SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = len(SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
A_ : List[str] = 1
return True
A_ : List[Any] = (not i < 0) and (not j < 0) # Check lower bounds
A_ : List[str] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
A_ : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
A_ : Union[str, Any] = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE , i + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j + 1 , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , i - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or run_maze(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , j - 1 , SCREAMING_SNAKE_CASE )
):
return True
A_ : List[str] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCAmelCase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = StableDiffusionLatentUpscalePipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE = frozenset([] )
SCREAMING_SNAKE_CASE = True
@property
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Any = 1
lowercase__ : Optional[int] = 4
lowercase__ : Optional[int] = (16, 16)
lowercase__ : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_A )
return image
def _lowerCAmelCase( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowercase__ : str = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=_A , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=_A , only_cross_attention=_A , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
lowercase__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
lowercase__ : int = EulerDiscreteScheduler(prediction_type='''sample''' )
lowercase__ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
lowercase__ : Tuple = CLIPTextModel(_A )
lowercase__ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Optional[Any]:
if str(_A ).startswith('''mps''' ):
lowercase__ : Optional[int] = torch.manual_seed(_A )
else:
lowercase__ : Dict = torch.Generator(device=_A ).manual_seed(_A )
lowercase__ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[Any] = "cpu"
lowercase__ : List[str] = self.get_dummy_components()
lowercase__ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowercase__ : Tuple = self.get_dummy_inputs(_A )
lowercase__ : Dict = pipe(**_A ).images
lowercase__ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
lowercase__ : Optional[Any] = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
lowercase__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1E-3 )
def _lowerCAmelCase( self ) -> List[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _lowerCAmelCase( self ) -> int:
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _lowerCAmelCase( self ) -> str:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> str:
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _lowerCAmelCase( self ) -> Optional[int]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _lowerCAmelCase( self ) -> List[str]:
super().test_save_load_local(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> Union[str, Any]:
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
lowercase__ : Union[str, Any] = self.get_dummy_components()
lowercase__ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
lowercase__ : Any = self.get_dummy_inputs(_A )
lowercase__ : Optional[Any] = 2
lowercase__ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase__ : Tuple = getattr(_A , scheduler_enum.name )
lowercase__ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
lowercase__ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : int = torch.manual_seed(33 )
lowercase__ : Optional[int] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
lowercase__ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowercase__ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
lowercase__ : str = pipe(_A , generator=_A , output_type='''latent''' ).images
lowercase__ : Optional[Any] = upscaler(
prompt=_A , image=_A , num_inference_steps=20 , guidance_scale=0 , generator=_A , output_type='''np''' , ).images[0]
lowercase__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _lowerCAmelCase( self ) -> str:
lowercase__ : int = torch.manual_seed(33 )
lowercase__ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowercase__ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
lowercase__ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
lowercase__ : str = upscaler(
prompt=_A , image=_A , num_inference_steps=20 , guidance_scale=0 , generator=_A , output_type='''np''' , ).images[0]
lowercase__ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 198 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
lowercase__ : Tuple = '''2020.9.26'''
lowercase__ : Any = '''xcodz-dot, cclaus, dhruvmanila'''
def UpperCamelCase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> Any:
"""simple docstring"""
if not all(isinstance(lowerCAmelCase__ , (float, int) ) for val in locals().values() ):
lowerCAmelCase_ : List[Any] = f"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = ((x * distance) / (z + distance)) * scale
lowerCAmelCase_ : int = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def UpperCamelCase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : str , lowerCAmelCase__ : float ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('Axis must be a str' )
lowerCAmelCase_ : Any = locals()
del input_variables["axis"]
if not all(isinstance(lowerCAmelCase__ , (float, int) ) for val in input_variables.values() ):
lowerCAmelCase_ : Union[str, Any] = (
"Input values except axis must either be float or int: "
f"{list(input_variables.values() )}"
)
raise TypeError(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowerCAmelCase_ : Any = x * math.cos(lowerCAmelCase__ ) - y * math.sin(lowerCAmelCase__ )
lowerCAmelCase_ : int = y * math.cos(lowerCAmelCase__ ) + x * math.sin(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = z
elif axis == "x":
lowerCAmelCase_ : str = y * math.cos(lowerCAmelCase__ ) - z * math.sin(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = z * math.cos(lowerCAmelCase__ ) + y * math.sin(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = x
elif axis == "y":
lowerCAmelCase_ : Any = x * math.cos(lowerCAmelCase__ ) - z * math.sin(lowerCAmelCase__ )
lowerCAmelCase_ : int = z * math.cos(lowerCAmelCase__ ) + x * math.sin(lowerCAmelCase__ )
lowerCAmelCase_ : str = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }')
print(f'{rotate(1.0, 2.0, 3.0, "y", 90.0) = }')
| 224 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def lowerCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ) ->Tuple:
_SCREAMING_SNAKE_CASE = b.T
_SCREAMING_SNAKE_CASE = np.sum(np.square(__lowerCamelCase ) , axis=1 )
_SCREAMING_SNAKE_CASE = np.sum(np.square(__lowerCamelCase ) , axis=0 )
_SCREAMING_SNAKE_CASE = np.matmul(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict ) ->Tuple:
_SCREAMING_SNAKE_CASE = x.reshape(-1 , 3 )
_SCREAMING_SNAKE_CASE = squared_euclidean_distance(__lowerCamelCase , __lowerCamelCase )
return np.argmin(__lowerCamelCase , axis=1 )
class a_ ( A__ ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self , A = None , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = True , **A , ) -> List[Any]:
super().__init__(**_A )
_SCREAMING_SNAKE_CASE = size if size is not None else {"height": 256, "width": 256}
_SCREAMING_SNAKE_CASE = get_size_dict(_A )
_SCREAMING_SNAKE_CASE = np.array(_A ) if clusters is not None else None
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = do_color_quantize
def snake_case_( self , A , A , A = PILImageResampling.BILINEAR , A = None , **A , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
_A , size=(size["""height"""], size["""width"""]) , resample=_A , data_format=_A , **_A )
def snake_case_( self , A , A = None , ) -> Any:
_SCREAMING_SNAKE_CASE = rescale(image=_A , scale=1 / 127.5 , data_format=_A )
_SCREAMING_SNAKE_CASE = image - 1
return image
def snake_case_( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(_A )
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_SCREAMING_SNAKE_CASE = clusters if clusters is not None else self.clusters
_SCREAMING_SNAKE_CASE = np.array(_A )
_SCREAMING_SNAKE_CASE = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(_A ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=_A ) for image in images]
if do_color_quantize:
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_A , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_SCREAMING_SNAKE_CASE = np.array(_A )
_SCREAMING_SNAKE_CASE = color_quantize(_A , _A ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_SCREAMING_SNAKE_CASE = images.shape[0]
_SCREAMING_SNAKE_CASE = images.reshape(_A , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_SCREAMING_SNAKE_CASE = list(_A )
else:
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_A , _A ) for image in images]
_SCREAMING_SNAKE_CASE = {"input_ids": images}
return BatchFeature(data=_A , tensor_type=_A )
| 58 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 0 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__snake_case = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def lowerCAmelCase_ ( __lowerCAmelCase )-> Tuple:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
__snake_case = parser.parse_args()
if args.check_lib:
__snake_case = importlib.import_module('''transformers''')
__snake_case = Path(transformers_module.__file__).parent
else:
__snake_case = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 348 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Dict = os.path.abspath(_UpperCAmelCase )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
__UpperCAmelCase : Any = tf.train.list_variables(_UpperCAmelCase )
__UpperCAmelCase : Any = []
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Optional[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__UpperCAmelCase : List[Any] = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
__UpperCAmelCase : List[Any] = name[1:]
# figure out how many levels deep the name is
__UpperCAmelCase : List[Any] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(_UpperCAmelCase )
# read data
__UpperCAmelCase : Tuple = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
names.append('''/'''.join(_UpperCAmelCase ) )
arrays.append(_UpperCAmelCase )
logger.info(f'Read a total of {len(_UpperCAmelCase ):,} layers' )
# Sanity check
if len(set(_UpperCAmelCase ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(_UpperCAmelCase ) )})' )
__UpperCAmelCase : List[Any] = list(set(_UpperCAmelCase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCAmelCase : int = full_name.split('''/''' )
__UpperCAmelCase : Union[str, Any] = model
__UpperCAmelCase : int = []
for i, m_name in enumerate(_UpperCAmelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
__UpperCAmelCase : int = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
__UpperCAmelCase : List[str] = getattr(_UpperCAmelCase , '''embeddings''' )
__UpperCAmelCase : str = getattr(_UpperCAmelCase , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
__UpperCAmelCase : Optional[int] = getattr(_UpperCAmelCase , '''encoder''' )
__UpperCAmelCase : Tuple = getattr(_UpperCAmelCase , '''layer''' )
__UpperCAmelCase : Dict = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
__UpperCAmelCase : Optional[int] = getattr(_UpperCAmelCase , '''pooler''' )
__UpperCAmelCase : str = getattr(_UpperCAmelCase , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
__UpperCAmelCase : Optional[Any] = getattr(_UpperCAmelCase , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
__UpperCAmelCase : List[str] = getattr(_UpperCAmelCase , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
__UpperCAmelCase : str = getattr(_UpperCAmelCase , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
__UpperCAmelCase : Union[str, Any] = getattr(_UpperCAmelCase , '''token_type_embeddings''' )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append('''weight''' )
__UpperCAmelCase : str = getattr(_UpperCAmelCase , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
__UpperCAmelCase : Optional[int] = getattr(_UpperCAmelCase , '''attention''' )
__UpperCAmelCase : Dict = getattr(_UpperCAmelCase , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
__UpperCAmelCase : List[str] = getattr(_UpperCAmelCase , '''attention''' )
__UpperCAmelCase : int = getattr(_UpperCAmelCase , '''output''' )
__UpperCAmelCase : Union[str, Any] = getattr(_UpperCAmelCase , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
__UpperCAmelCase : Tuple = getattr(_UpperCAmelCase , '''attention''' )
__UpperCAmelCase : List[Any] = getattr(_UpperCAmelCase , '''output''' )
__UpperCAmelCase : Optional[Any] = getattr(_UpperCAmelCase , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
__UpperCAmelCase : str = getattr(_UpperCAmelCase , '''output''' )
__UpperCAmelCase : str = getattr(_UpperCAmelCase , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
__UpperCAmelCase : Tuple = getattr(_UpperCAmelCase , '''output''' )
__UpperCAmelCase : List[str] = getattr(_UpperCAmelCase , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
__UpperCAmelCase : Optional[int] = getattr(_UpperCAmelCase , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
__UpperCAmelCase : Union[str, Any] = getattr(_UpperCAmelCase , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
__UpperCAmelCase : Any = getattr(_UpperCAmelCase , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
__UpperCAmelCase : Union[str, Any] = getattr(_UpperCAmelCase , '''intermediate''' )
__UpperCAmelCase : Dict = getattr(_UpperCAmelCase , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
__UpperCAmelCase : Tuple = getattr(_UpperCAmelCase , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
__UpperCAmelCase : int = getattr(_UpperCAmelCase , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
__UpperCAmelCase : Any = getattr(_UpperCAmelCase , '''weight''' )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
__UpperCAmelCase : Tuple = ".".join(_UpperCAmelCase )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , _UpperCAmelCase ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__UpperCAmelCase : Tuple = array.transpose()
if pointer.shape == array.shape:
__UpperCAmelCase : List[Any] = torch.from_numpy(_UpperCAmelCase )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def a ( _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ):
'''simple docstring'''
logger.info(f'Loading model based on config from {config_path}...' )
__UpperCAmelCase : int = BertConfig.from_json_file(_UpperCAmelCase )
__UpperCAmelCase : List[str] = BertModel(_UpperCAmelCase )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
__A =parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 226 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase_ = data_utils.TransfoXLTokenizer
lowercase_ = data_utils.TransfoXLCorpus
lowercase_ = data_utils
lowercase_ = data_utils
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case , '''rb''' ) as fp:
__SCREAMING_SNAKE_CASE : int = pickle.load(snake_case , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__SCREAMING_SNAKE_CASE : str = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
__SCREAMING_SNAKE_CASE : List[str] = corpus.vocab.__dict__
torch.save(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : int = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(snake_case , snake_case )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__SCREAMING_SNAKE_CASE : Any = os.path.abspath(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.abspath(snake_case )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__SCREAMING_SNAKE_CASE : int = TransfoXLConfig()
else:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLConfig.from_json_file(snake_case )
print(F'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE : Tuple = TransfoXLLMHeadModel(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = load_tf_weights_in_transfo_xl(snake_case , snake_case , snake_case )
# Save pytorch-model
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : int = os.path.join(snake_case , snake_case )
print(F'''Save PyTorch model to {os.path.abspath(snake_case )}''' )
torch.save(model.state_dict() , snake_case )
print(F'''Save configuration file to {os.path.abspath(snake_case )}''' )
with open(snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
lowercase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 303 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.