code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 717 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : Union[str, Any] , **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : str , *a_ : int , **a_ : str ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : str , *a_ : Optional[Any] , **a_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : List[str] , *a_ : int , **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[int] , *a_ : Optional[Any] , **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[int] , *a_ : List[str] , **a_ : Dict ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[int] , *a_ : List[str] , **a_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : str , **a_ : Tuple ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Any , *a_ : Any , **a_ : str ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : str , *a_ : Any , **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : Union[str, Any] , **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[int] , *a_ : int , **a_ : List[str] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : List[Any] , *a_ : Union[str, Any] , **a_ : List[str] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : Any , **a_ : Tuple ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : Optional[int] , **a_ : Tuple ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[int] , *a_ : List[Any] , **a_ : int ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[int] , *a_ : Any , **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : List[str] , *a_ : List[str] , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : List[str] , *a_ : str , **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : Tuple , **a_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : int , *a_ : Optional[int] , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Tuple , *a_ : str , **a_ : List[str] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Dict , *a_ : Dict , **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Dict , *a_ : Tuple , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : List[Any] , *a_ : str , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[int] , *a_ : Optional[int] , **a_ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Dict , *a_ : int , **a_ : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : int , *a_ : int , **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *a_ : List[str] , **a_ : List[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Any , *a_ : str , **a_ : int ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class SCREAMING_SNAKE_CASE__ ( metaclass=_UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""sentencepiece"""]
def __init__( self : Optional[int] , *a_ : Optional[int] , **a_ : List[str] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mobilenet_v2"""
def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = depth_divisible_by
__snake_case = min_depth
__snake_case = expand_ratio
__snake_case = output_stride
__snake_case = first_layer_is_expansion
__snake_case = finegrained_output
__snake_case = hidden_act
__snake_case = tf_padding
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-4
| 680 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : List[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
a : str = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ) -> Optional[int]:
for attribute in key.split("." ):
__snake_case = getattr(_UpperCAmelCase , _UpperCAmelCase )
if weight_type is not None:
__snake_case = getattr(_UpperCAmelCase , _UpperCAmelCase ).shape
else:
__snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case = value
elif weight_type == "weight_g":
__snake_case = value
elif weight_type == "weight_v":
__snake_case = value
elif weight_type == "bias":
__snake_case = value
else:
__snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Tuple:
__snake_case = []
__snake_case = fairseq_model.state_dict()
__snake_case = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case = False
if "conv_layers" in name:
load_conv_layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
__snake_case = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__snake_case = True
if "*" in mapped_key:
__snake_case = name.split(_UpperCAmelCase )[0].split("." )[-2]
__snake_case = mapped_key.replace("*" , _UpperCAmelCase )
if "weight_g" in name:
__snake_case = "weight_g"
elif "weight_v" in name:
__snake_case = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
__snake_case = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case = "weight"
else:
__snake_case = None
set_recursively(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
continue
if not is_used:
unused_weights.append(_UpperCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ) -> int:
__snake_case = full_name.split("conv_layers." )[-1]
__snake_case = name.split("." )
__snake_case = int(items[0] )
__snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_UpperCAmelCase )
@torch.no_grad()
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=None ) -> Dict:
# load the pre-trained checkpoints
__snake_case = torch.load(_UpperCAmelCase )
__snake_case = WavLMConfigOrig(checkpoint["cfg"] )
__snake_case = WavLMOrig(_UpperCAmelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
__snake_case = WavLMConfig.from_pretrained(_UpperCAmelCase )
else:
__snake_case = WavLMConfig()
__snake_case = WavLMModel(_UpperCAmelCase )
recursively_load_weights(_UpperCAmelCase , _UpperCAmelCase )
hf_wavlm.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
a : List[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 719 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680 | 0 |
def __UpperCAmelCase ( _UpperCAmelCase : list[list] ) -> list[list]:
__snake_case = current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
__snake_case = row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
__snake_case = column
continue
__snake_case = column / magnitude
# Subtract to cancel term
__snake_case = current_set[0]
__snake_case = [first_row]
__snake_case = current_set[1::]
for row in current_set:
__snake_case = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
__snake_case = final_set[0]
__snake_case = []
__snake_case = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
__snake_case = simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _UpperCAmelCase )
__snake_case = resultant
return final_set
def __UpperCAmelCase ( _UpperCAmelCase : list[list] ) -> list:
if len(_UpperCAmelCase ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
__snake_case = len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(_UpperCAmelCase , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
__snake_case = equations.copy()
if any(0 in row for row in data_set ):
__snake_case = data_set.copy()
__snake_case = []
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
__snake_case = data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , _UpperCAmelCase )
__snake_case = data_set.copy()
__snake_case = simplify(_UpperCAmelCase )
__snake_case = simplified[::-1]
__snake_case = []
for row in simplified:
__snake_case = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
__snake_case = row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
__snake_case = temp_row[1::]
__snake_case = temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
__snake_case = []
for item in solutions:
final.append(float(round(_UpperCAmelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 720 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 680 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = KandinskyVaaPipeline
__SCREAMING_SNAKE_CASE = [
"""image_embeds""",
"""negative_image_embeds""",
]
__SCREAMING_SNAKE_CASE = ["""image_embeds""", """negative_image_embeds"""]
__SCREAMING_SNAKE_CASE = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__SCREAMING_SNAKE_CASE = False
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return 32
@property
def A ( self : int ):
"""simple docstring"""
return 32
@property
def A ( self : List[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def A ( self : Tuple ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return 100
@property
def A ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__snake_case = UNetaDConditionModel(**a_ )
return model
@property
def A ( self : Tuple ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : Any ):
"""simple docstring"""
__snake_case = self.dummy_unet
__snake_case = self.dummy_movq
__snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=a_ , set_alpha_to_one=a_ , steps_offset=1 , prediction_type="epsilon" , thresholding=a_ , )
__snake_case = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A ( self : Union[str, Any] , a_ : Tuple , a_ : str=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(a_ ) ).to(a_ )
__snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
a_ )
if str(a_ ).startswith("mps" ):
__snake_case = torch.manual_seed(a_ )
else:
__snake_case = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = "cpu"
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**a_ )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = pipe(**self.get_dummy_inputs(a_ ) )
__snake_case = output.images
__snake_case = pipe(
**self.get_dummy_inputs(a_ ) , return_dict=a_ , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case = np.array(
[0.6237976, 1.0, 0.36441332, 1.0, 0.70639634, 0.29877186, 0.85652125, 0.5216843, 0.54454046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
__snake_case = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(a_ )
__snake_case = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
__snake_case = pipeline.to(a_ )
pipeline.set_progress_bar_config(disable=a_ )
__snake_case = "red cat, 4k photo"
__snake_case = torch.Generator(device="cuda" ).manual_seed(0 )
__snake_case , __snake_case = pipe_prior(
a_ , generator=a_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__snake_case = torch.Generator(device="cuda" ).manual_seed(0 )
__snake_case = pipeline(
image_embeds=a_ , negative_image_embeds=a_ , generator=a_ , num_inference_steps=100 , output_type="np" , )
__snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(a_ , a_ )
| 721 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a : List[str] = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
__snake_case = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> Dict:
__snake_case , __snake_case = emb.weight.shape
__snake_case = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
__snake_case = emb.weight.data
return lin_layer
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> List[str]:
__snake_case = torch.load(_UpperCAmelCase , map_location="cpu" )
__snake_case = mam_aaa["args"] or mam_aaa["cfg"]["model"]
__snake_case = mam_aaa["model"]
remove_ignore_keys_(_UpperCAmelCase )
__snake_case = state_dict["encoder.embed_tokens.weight"].shape[0]
__snake_case = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
__snake_case = state_dict["decoder.embed_tokens.weight"]
__snake_case = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
__snake_case = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
a : Optional[Any] = parser.parse_args()
a : Optional[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 701 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Optional[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = generator.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 680 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def __UpperCAmelCase ( _UpperCAmelCase : int = 8 , _UpperCAmelCase : int | None = None ) -> str:
__snake_case = np.random.default_rng(seed=_UpperCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__snake_case = 6 * key_len
# Measurement basis for Alice's qubits.
__snake_case = rng.integers(2 , size=_UpperCAmelCase )
# The set of states Alice will prepare.
__snake_case = rng.integers(2 , size=_UpperCAmelCase )
# Measurement basis for Bob's qubits.
__snake_case = rng.integers(2 , size=_UpperCAmelCase )
# Quantum Circuit to simulate BB84
__snake_case = qiskit.QuantumCircuit(_UpperCAmelCase , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_UpperCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_UpperCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_UpperCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_UpperCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_UpperCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__snake_case = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__snake_case = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1 , seed_simulator=_UpperCAmelCase )
# Returns the result of measurement.
__snake_case = job.result().get_counts(_UpperCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__snake_case = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__snake_case = gen_key[:key_len] if len(_UpperCAmelCase ) >= key_len else gen_key.ljust(_UpperCAmelCase , "0" )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 702 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Any = get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
__snake_case = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__snake_case = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , )
__snake_case = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
optimizer.load_state_dict(_UpperCAmelCase )
| 680 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a : int = sys.version_info >= (3, 10)
def __UpperCAmelCase ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Union[str, Any]=None ) -> List[str]:
return field(default_factory=lambda: default , metadata=_UpperCAmelCase )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """titi"""
__SCREAMING_SNAKE_CASE = """toto"""
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """titi"""
__SCREAMING_SNAKE_CASE = """toto"""
__SCREAMING_SNAKE_CASE = 42
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = """toto"""
def A ( self : Dict ):
"""simple docstring"""
__snake_case = BasicEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = """toto"""
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = MixedTypeEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """help message"""} )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = list_field(default=[] )
__SCREAMING_SNAKE_CASE = list_field(default=[] )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = list_field(default=[] )
__SCREAMING_SNAKE_CASE = list_field(default=[1, 2, 3] )
__SCREAMING_SNAKE_CASE = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
__SCREAMING_SNAKE_CASE = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = field()
__SCREAMING_SNAKE_CASE = field()
__SCREAMING_SNAKE_CASE = field()
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = BasicEnum(self.required_enum )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = field()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default="""toto""" , metadata={"""help""": """help message"""} )
__SCREAMING_SNAKE_CASE = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = field(default=_UpperCamelCase , metadata={"""help""": """help message"""} )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = list_field(default=[] )
__SCREAMING_SNAKE_CASE = list_field(default=[] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : List[Any] , a_ : argparse.ArgumentParser , a_ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__snake_case = {k: v for k, v in vars(a_ ).items() if k != "container"}
__snake_case = {k: v for k, v in vars(a_ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , a_ ) and yy.get("choices" , a_ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](a_ ) , yy["type"](a_ ) )
del xx["type"], yy["type"]
self.assertEqual(a_ , a_ )
def A ( self : Any ):
"""simple docstring"""
__snake_case = HfArgumentParser(a_ )
__snake_case = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a_ , required=a_ )
expected.add_argument("--bar" , type=a_ , required=a_ )
expected.add_argument("--baz" , type=a_ , required=a_ )
expected.add_argument("--flag" , type=a_ , default=a_ , const=a_ , nargs="?" )
self.argparsersEqual(a_ , a_ )
__snake_case = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((__snake_case ) , ) = parser.parse_args_into_dataclasses(a_ , look_for_args_file=a_ )
self.assertFalse(example.flag )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = HfArgumentParser(a_ )
__snake_case = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=a_ )
expected.add_argument("--baz" , default="toto" , type=a_ , help="help message" )
self.argparsersEqual(a_ , a_ )
def A ( self : Any ):
"""simple docstring"""
__snake_case = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a_ , default=a_ , const=a_ , nargs="?" )
expected.add_argument("--baz" , type=a_ , default=a_ , const=a_ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=a_ , dest="baz" )
expected.add_argument("--opt" , type=a_ , default=a_ )
__snake_case = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a_ )
for dataclass_type in dataclass_types:
__snake_case = HfArgumentParser(a_ )
self.argparsersEqual(a_ , a_ )
__snake_case = parser.parse_args([] )
self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) )
__snake_case = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) )
__snake_case = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) )
__snake_case = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) )
__snake_case = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(a_ , Namespace(foo=a_ , baz=a_ , opt=a_ ) )
def A ( self : str ):
"""simple docstring"""
__snake_case = HfArgumentParser(a_ )
__snake_case = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(a_ , a_ )
__snake_case = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
__snake_case = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__snake_case = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
__snake_case = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__snake_case = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
__snake_case = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def A ( self : Any ):
"""simple docstring"""
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = """toto"""
__snake_case = HfArgumentParser(a_ )
__snake_case = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(a_ , a_ )
__snake_case = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
__snake_case = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
__snake_case = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = HfArgumentParser(a_ )
__snake_case = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=a_ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=a_ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=a_ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=a_ )
self.argparsersEqual(a_ , a_ )
__snake_case = parser.parse_args([] )
self.assertEqual(
a_ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
__snake_case = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(a_ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = argparse.ArgumentParser()
expected.add_argument("--foo" , default=a_ , type=a_ )
expected.add_argument("--bar" , default=a_ , type=a_ , help="help message" )
expected.add_argument("--baz" , default=a_ , type=a_ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=a_ )
expected.add_argument("--des" , nargs="+" , default=[] , type=a_ )
__snake_case = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a_ )
for dataclass_type in dataclass_types:
__snake_case = HfArgumentParser(a_ )
self.argparsersEqual(a_ , a_ )
__snake_case = parser.parse_args([] )
self.assertEqual(a_ , Namespace(foo=a_ , bar=a_ , baz=a_ , ces=[] , des=[] ) )
__snake_case = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(a_ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def A ( self : int ):
"""simple docstring"""
__snake_case = HfArgumentParser(a_ )
__snake_case = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=a_ , required=a_ )
expected.add_argument("--required_str" , type=a_ , required=a_ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=a_ , )
self.argparsersEqual(a_ , a_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = HfArgumentParser(a_ )
__snake_case = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a_ , required=a_ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=a_ , )
expected.add_argument("--opt" , type=a_ , default=a_ )
expected.add_argument("--baz" , default="toto" , type=a_ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=a_ )
self.argparsersEqual(a_ , a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = HfArgumentParser(a_ )
__snake_case = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
__snake_case = parser.parse_dict(a_ )[0]
__snake_case = BasicExample(**a_ )
self.assertEqual(a_ , a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = HfArgumentParser(a_ )
__snake_case = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(a_ , parser.parse_dict , a_ , allow_extra_keys=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = HfArgumentParser(a_ )
__snake_case = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = os.path.join(a_ , "temp_json" )
os.mkdir(a_ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(a_ , a_ )
__snake_case = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
__snake_case = BasicExample(**a_ )
self.assertEqual(a_ , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = HfArgumentParser(a_ )
__snake_case = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = os.path.join(a_ , "temp_yaml" )
os.mkdir(a_ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(a_ , a_ )
__snake_case = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
__snake_case = BasicExample(**a_ )
self.assertEqual(a_ , a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = HfArgumentParser(a_ )
self.assertIsNotNone(a_ )
| 703 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__snake_case = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from PIL import Image
def __UpperCAmelCase ( _UpperCAmelCase : Image , _UpperCAmelCase : int ) -> Image:
__snake_case = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(_UpperCAmelCase : int ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(_UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
a : Dict = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 704 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
__snake_case = len(bin(_UpperCAmelCase )[3:] )
__snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
__snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( _UpperCAmelCase : list[float] , _UpperCAmelCase : list[float] ) -> float:
__snake_case = sorted(numsa + numsa )
__snake_case , __snake_case = divmod(len(_UpperCAmelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Tuple = [float(x) for x in input('''Enter the elements of first array: ''').split()]
a : List[Any] = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 705 |
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ) -> None:
def do_benchmark(_UpperCAmelCase : int ) -> None:
__snake_case = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
__snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
__snake_case = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680 | 0 |
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
a : Dict = 6_378_137.0
a : Union[str, Any] = 6_356_752.314_245
a : Any = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = radians(_UpperCAmelCase )
__snake_case = radians(_UpperCAmelCase )
# Equation
__snake_case = sin((phi_a - phi_a) / 2 )
__snake_case = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__snake_case = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Dict = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 680 | 0 |
'''simple docstring'''
from math import sqrt
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> bool:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
__snake_case = True
# 0 and 1 are none primes.
if number <= 1:
__snake_case = False
for divisor in range(2 , int(round(sqrt(_UpperCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__snake_case = False
break
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'status' must been from type bool"
return status
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Dict:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__snake_case = list(range(2 , n + 1 ) )
__snake_case = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_UpperCAmelCase ) ):
for j in range(i + 1 , len(_UpperCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__snake_case = 0
# filters actual prime numbers.
__snake_case = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> Any:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
__snake_case = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_UpperCAmelCase ):
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> List[str]:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
__snake_case = [] # this list will be returns of the function.
# potential prime number factors.
__snake_case = 2
__snake_case = number
if number == 0 or number == 1:
ans.append(_UpperCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_UpperCAmelCase ):
while quotient != 1:
if is_prime(_UpperCAmelCase ) and (quotient % factor == 0):
ans.append(_UpperCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> Any:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__snake_case = 0
# prime factorization of 'number'
__snake_case = prime_factorization(_UpperCAmelCase )
__snake_case = max(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int"
return ans
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> Any:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
__snake_case = 0
# prime factorization of 'number'
__snake_case = prime_factorization(_UpperCAmelCase )
__snake_case = min(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int"
return ans
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Union[str, Any]:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _UpperCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> int:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _UpperCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> int:
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (number > 2) and is_even(_UpperCAmelCase )
), "'number' must been an int, even and > 2"
__snake_case = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__snake_case = get_prime_numbers(_UpperCAmelCase )
__snake_case = len(_UpperCAmelCase )
# run variable for while-loops.
__snake_case = 0
__snake_case = None
# exit variable. for break up the loops
__snake_case = True
while i < len_pn and loop:
__snake_case = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__snake_case = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (len(_UpperCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> int:
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__snake_case = 0
while numbera != 0:
__snake_case = numbera % numbera
__snake_case = numbera
__snake_case = rest
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> List[str]:
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__snake_case = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__snake_case = prime_factorization(_UpperCAmelCase )
__snake_case = prime_factorization(_UpperCAmelCase )
elif numbera == 1 or numbera == 1:
__snake_case = []
__snake_case = []
__snake_case = max(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = 0
__snake_case = 0
__snake_case = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__snake_case = prime_fac_a.count(_UpperCAmelCase )
__snake_case = prime_fac_a.count(_UpperCAmelCase )
for _ in range(max(_UpperCAmelCase , _UpperCAmelCase ) ):
ans *= n
else:
__snake_case = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__snake_case = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] ) -> Any:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'number' must been a positive int"
__snake_case = 0
__snake_case = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_UpperCAmelCase ):
ans += 1
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and is_prime(
_UpperCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
assert (
is_prime(_UpperCAmelCase ) and is_prime(_UpperCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__snake_case = p_number_a + 1 # jump to the next number
__snake_case = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
while number < p_number_a:
ans.append(_UpperCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and ans[0] != p_number_a
and ans[len(_UpperCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> str:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
__snake_case = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_UpperCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(_UpperCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> Optional[int]:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
__snake_case = get_divisors(_UpperCAmelCase )
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (divisors[0] == 1)
and (divisors[len(_UpperCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__snake_case = gcd(abs(_UpperCAmelCase ) , abs(_UpperCAmelCase ) )
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> Optional[Any]:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
__snake_case = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> Optional[Any]:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
__snake_case = 0
__snake_case = 1
__snake_case = 1 # this will be return
for _ in range(n - 1 ):
__snake_case = ans
ans += fiba
__snake_case = tmp
return ans
| 707 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sequence-classification"""
def __init__( self : List[str] , a_ : str ):
"""simple docstring"""
if type(a_ ) == dict:
__snake_case = Namespace(**a_ )
__snake_case = glue_output_modes[hparams.task]
__snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : int , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case = outputs[0]
__snake_case = self.trainer.lr_schedulers[0]["scheduler"]
__snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.hparams
__snake_case = processors[args.task]()
__snake_case = processor.get_labels()
for mode in ["train", "dev"]:
__snake_case = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__snake_case = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ):
"""simple docstring"""
__snake_case = "dev" if mode == "test" else mode
__snake_case = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__snake_case = torch.load(a_ )
__snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : int , a_ : List[str] , a_ : Tuple ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case , __snake_case = outputs[:2]
__snake_case = logits.detach().cpu().numpy()
__snake_case = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Dict , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__snake_case = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__snake_case = np.squeeze(a_ )
__snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__snake_case = dict(results.items() )
__snake_case = results
return ret, preds_list, out_label_list
def A ( self : Tuple , a_ : list ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : int , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( a_ : str , a_ : Any ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCAmelCase , os.getcwd() )
__snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__snake_case = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__snake_case = GLUETransformer(_UpperCAmelCase )
__snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) )
__snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
a : Optional[int] = logging.get_logger(__name__)
a : Dict[Optional[str], Type[Formatter]] = {}
a : Dict[Optional[str], str] = {}
a : Dict[Optional[str], Exception] = {}
def __UpperCAmelCase ( _UpperCAmelCase : type , _UpperCAmelCase : Optional[str] , _UpperCAmelCase : Optional[List[str]] = None , ) -> str:
__snake_case = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' )
__snake_case = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' )
__snake_case = format_type
def __UpperCAmelCase ( _UpperCAmelCase : Exception , _UpperCAmelCase : Optional[str] , _UpperCAmelCase : Optional[List[str]] = None ) -> int:
__snake_case = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__snake_case = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
a : List[Any] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
a : str = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
a : Any = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def __UpperCAmelCase ( _UpperCAmelCase : Optional[str] ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __UpperCAmelCase ( _UpperCAmelCase : Optional[str] , **_UpperCAmelCase : int ) -> Formatter:
__snake_case = get_format_type_from_alias(_UpperCAmelCase )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_UpperCAmelCase )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
| 708 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680 | 0 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : List[str] = logging.get_logger(__name__)
a : Union[str, Any] = '''T5Config'''
def __UpperCAmelCase ( _UpperCAmelCase : jnp.array , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> jnp.ndarray:
__snake_case = jnp.zeros_like(_UpperCAmelCase )
__snake_case = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__snake_case = shifted_input_ids.at[:, 0].set(_UpperCAmelCase )
__snake_case = jnp.where(shifted_input_ids == -1_00 , _UpperCAmelCase , _UpperCAmelCase )
return shifted_input_ids
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mt5"""
__SCREAMING_SNAKE_CASE = MTaConfig
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mt5"""
__SCREAMING_SNAKE_CASE = MTaConfig
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mt5"""
__SCREAMING_SNAKE_CASE = MTaConfig
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """gpt_bigcode"""
__SCREAMING_SNAKE_CASE = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] , a_ : Optional[int]=50_257 , a_ : List[str]=1_024 , a_ : List[Any]=768 , a_ : Tuple=12 , a_ : Any=12 , a_ : Any=None , a_ : Any="gelu_pytorch_tanh" , a_ : List[Any]=0.1 , a_ : List[Any]=0.1 , a_ : int=0.1 , a_ : Any=1e-5 , a_ : List[str]=0.02 , a_ : str=True , a_ : Optional[int]=True , a_ : Tuple=50_256 , a_ : List[str]=50_256 , a_ : Union[str, Any]=True , a_ : int=True , a_ : Dict=True , **a_ : List[Any] , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = n_positions
__snake_case = n_embd
__snake_case = n_layer
__snake_case = n_head
__snake_case = n_inner
__snake_case = activation_function
__snake_case = resid_pdrop
__snake_case = embd_pdrop
__snake_case = attn_pdrop
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = scale_attn_weights
__snake_case = use_cache
__snake_case = attention_softmax_in_fpaa
__snake_case = scale_attention_softmax_in_fpaa
__snake_case = multi_query
__snake_case = bos_token_id
__snake_case = eos_token_id
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
| 710 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
a : List[str] = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680 | 0 |
'''simple docstring'''
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray ) -> np.ndarray:
return vector * sigmoid(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = name
__snake_case = value
__snake_case = weight
def __repr__( self : Optional[int] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def A ( self : Any ):
"""simple docstring"""
return self.value
def A ( self : str ):
"""simple docstring"""
return self.name
def A ( self : int ):
"""simple docstring"""
return self.weight
def A ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case = []
for i in range(len(_UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
__snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase )
__snake_case = []
__snake_case , __snake_case = 0.0, 0.0
for i in range(len(_UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
__snake_case = 5
# Realm tok
__snake_case = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__snake_case = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(a_ , exist_ok=a_ )
__snake_case = os.path.join(a_ , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__snake_case = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(a_ , exist_ok=a_ )
def A ( self : int ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def A ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = RealmConfig(num_block_records=self.num_block_records )
return config
def A ( self : Any ):
"""simple docstring"""
__snake_case = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def A ( self : int ):
"""simple docstring"""
__snake_case = np.array(
[
B"This is the first record",
B"This is the second record",
B"This is the third record",
B"This is the fourth record",
B"This is the fifth record",
B"This is a longer longer longer record",
] , dtype=a_ , )
return block_records
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.get_config()
__snake_case = self.get_dummy_retriever()
__snake_case = retriever.tokenizer
__snake_case = np.array([0, 3] , dtype="long" )
__snake_case = tokenizer(["Test question"] ).input_ids
__snake_case = tokenizer(
["the fourth"] , add_special_tokens=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , ).input_ids
__snake_case = config.reader_seq_len
__snake_case , __snake_case , __snake_case , __snake_case = retriever(
a_ , a_ , answer_ids=a_ , max_length=a_ , return_tensors="np" )
self.assertEqual(len(a_ ) , 2 )
self.assertEqual(len(a_ ) , 2 )
self.assertEqual(len(a_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.get_config()
__snake_case = self.get_dummy_retriever()
__snake_case = retriever.tokenizer
__snake_case = np.array([0, 3, 5] , dtype="long" )
__snake_case = tokenizer(["Test question"] ).input_ids
__snake_case = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , ).input_ids
__snake_case = config.reader_seq_len
__snake_case , __snake_case , __snake_case , __snake_case = retriever(
a_ , a_ , answer_ids=a_ , max_length=a_ , return_tensors="np" )
self.assertEqual([False, True, True] , a_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , a_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , a_ )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
__snake_case = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
__snake_case = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
__snake_case = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , B"This is the first record" )
| 713 |
'''simple docstring'''
import os
from math import logaa
def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int:
__snake_case = 0
__snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
__snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
__snake_case = x * logaa(_UpperCAmelCase )
__snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 680 | 0 |
'''simple docstring'''
import os
from math import logaa
def __UpperCAmelCase ( _UpperCAmelCase = "base_exp.txt" ) -> int:
__snake_case = 0
__snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
__snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
__snake_case = x * logaa(_UpperCAmelCase )
__snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 714 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Any = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : Optional[int] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , )
__snake_case = add_prefix_space
def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ):
"""simple docstring"""
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 680 | 0 |
'''simple docstring'''
import math
import os
import sys
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
__snake_case = ""
try:
with open(_UpperCAmelCase , "rb" ) as binary_file:
__snake_case = binary_file.read()
for dat in data:
__snake_case = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def __UpperCAmelCase ( _UpperCAmelCase : dict[str, str] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : str ) -> None:
lexicon.pop(_UpperCAmelCase )
__snake_case = last_match_id
if math.loga(_UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case = "0" + lexicon[curr_key]
__snake_case = bin(_UpperCAmelCase )[2:]
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
__snake_case = {"0": "0", "1": "1"}
__snake_case , __snake_case = "", ""
__snake_case = len(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
index += 1
__snake_case = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case = lexicon[curr_string]
result += last_match_id
return result
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
__snake_case = os.path.getsize(_UpperCAmelCase )
__snake_case = bin(_UpperCAmelCase )[2:]
__snake_case = len(_UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> None:
__snake_case = 8
try:
with open(_UpperCAmelCase , "wb" ) as opened_file:
__snake_case = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> None:
__snake_case = read_file_binary(_UpperCAmelCase )
__snake_case = compress_data(_UpperCAmelCase )
__snake_case = add_file_length(_UpperCAmelCase , _UpperCAmelCase )
write_file_binary(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 | 0 |
'''simple docstring'''
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Optional[Any] , a_ : Any ):
"""simple docstring"""
__snake_case = data
def __iter__( self : int ):
"""simple docstring"""
for element in self.data:
yield element
def A__ ( _UpperCAmelCase : Dict=True ) -> List[Any]:
__snake_case = Accelerator(even_batches=_UpperCAmelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def A__ ( _UpperCAmelCase : Accelerator , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : bool = False ) -> Optional[Any]:
if iterable:
__snake_case = DummyIterableDataset(torch.as_tensor(range(_UpperCAmelCase ) ) )
else:
__snake_case = TensorDataset(torch.as_tensor(range(_UpperCAmelCase ) ) )
__snake_case = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase )
__snake_case = accelerator.prepare(_UpperCAmelCase )
return dl
def A__ ( _UpperCAmelCase : Accelerator , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : List[int] , _UpperCAmelCase : List[int] , ) -> Optional[int]:
__snake_case = create_dataloader(accelerator=_UpperCAmelCase , dataset_size=_UpperCAmelCase , batch_size=_UpperCAmelCase )
__snake_case = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def A__ ( ) -> List[Any]:
__snake_case = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_UpperCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_UpperCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def A__ ( ) -> Tuple:
__snake_case = create_accelerator(even_batches=_UpperCAmelCase )
verify_dataloader_batch_sizes(
_UpperCAmelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_UpperCAmelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def A__ ( ) -> Union[str, Any]:
__snake_case = create_accelerator(even_batches=_UpperCAmelCase )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(_UpperCAmelCase )
__snake_case = create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 )
__snake_case = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_UpperCAmelCase ):
__snake_case = ddp_model(batch[0].float() )
__snake_case = output.sum()
loss.backward()
batch_idxs.append(_UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def A__ ( _UpperCAmelCase : Optional[int] ) -> List[str]:
with warnings.catch_warnings(record=_UpperCAmelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _UpperCAmelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def A__ ( ) -> List[str]:
__snake_case = True
__snake_case = False
__snake_case = create_accelerator(even_batches=_UpperCAmelCase )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(_UpperCAmelCase )
__snake_case = create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 )
__snake_case = create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_UpperCAmelCase ):
__snake_case = train_dl.batch_sampler.even_batches
__snake_case = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def A__ ( ) -> List[str]:
__snake_case = True
__snake_case = False
__snake_case = create_accelerator(even_batches=_UpperCAmelCase )
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(_UpperCAmelCase )
create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 , iterable=_UpperCAmelCase )
__snake_case = create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("ignore" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_UpperCAmelCase ):
__snake_case = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def A__ ( ) -> Optional[int]:
__snake_case = create_accelerator()
__snake_case = torch.nn.Linear(1 , 1 )
__snake_case = accelerator.prepare(_UpperCAmelCase )
create_dataloader(_UpperCAmelCase , dataset_size=3 , batch_size=1 , iterable=_UpperCAmelCase )
with warnings.catch_warnings(record=_UpperCAmelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_UpperCAmelCase ):
pass
assert issubclass(w[-1].category , _UpperCAmelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def A__ ( ) -> Dict:
__snake_case = create_accelerator()
accelerator.print("Test that even_batches variable ensures uniform batches across processes" )
test_default_ensures_even_batch_sizes()
accelerator.print("Run tests with even_batches disabled" )
test_can_disable_even_batches()
accelerator.print("Test joining uneven inputs" )
test_can_join_uneven_inputs()
accelerator.print("Test overriding even_batches when joining uneven inputs" )
test_join_can_override_even_batches()
accelerator.print("Test overriding even_batches for mixed dataloader types" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("Test join with non DDP distributed raises warning" )
__snake_case = accelerator.state.distributed_type
__snake_case = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_UpperCAmelCase )
__snake_case = original_state
if __name__ == "__main__":
main()
| 716 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] ) -> int:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str=0 ) -> Optional[int]:
return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x[column] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Any=float("inf" ) ) -> Any:
for i in range(points_counts - 1 ):
for j in range(i + 1 , _UpperCAmelCase ):
__snake_case = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__snake_case = current_dis
return min_dis
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int]=float("inf" ) ) -> Any:
for i in range(min(6 , points_counts - 1 ) , _UpperCAmelCase ):
for j in range(max(0 , i - 6 ) , _UpperCAmelCase ):
__snake_case = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__snake_case = current_dis
return min_dis
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> Optional[int]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(_UpperCAmelCase , _UpperCAmelCase )
# recursion
__snake_case = points_counts // 2
__snake_case = closest_pair_of_points_sqr(
_UpperCAmelCase , points_sorted_on_y[:mid] , _UpperCAmelCase )
__snake_case = closest_pair_of_points_sqr(
_UpperCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
__snake_case = min(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_UpperCAmelCase )
__snake_case = dis_between_closest_in_strip(
_UpperCAmelCase , len(_UpperCAmelCase ) , _UpperCAmelCase )
return min(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Dict:
__snake_case = column_based_sort(_UpperCAmelCase , column=0 )
__snake_case = column_based_sort(_UpperCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
) ** 0.5
if __name__ == "__main__":
a : List[Any] = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 717 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680 | 0 |
'''simple docstring'''
import math
import sys
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
__snake_case = ""
try:
with open(_UpperCAmelCase , "rb" ) as binary_file:
__snake_case = binary_file.read()
for dat in data:
__snake_case = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
__snake_case = {"0": "0", "1": "1"}
__snake_case , __snake_case = "", ""
__snake_case = len(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case = lexicon[curr_string]
result += last_match_id
__snake_case = last_match_id + "0"
if math.loga(_UpperCAmelCase ).is_integer():
__snake_case = {}
for curr_key in list(_UpperCAmelCase ):
__snake_case = lexicon.pop(_UpperCAmelCase )
__snake_case = new_lex
__snake_case = last_match_id + "1"
index += 1
__snake_case = ""
return result
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> None:
__snake_case = 8
try:
with open(_UpperCAmelCase , "wb" ) as opened_file:
__snake_case = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
__snake_case = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__snake_case = data_bits[counter:]
__snake_case = data_bits[counter + 1 :]
return data_bits
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> None:
__snake_case = read_file_binary(_UpperCAmelCase )
__snake_case = remove_prefix(_UpperCAmelCase )
__snake_case = decompress_data(_UpperCAmelCase )
write_file_binary(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mobilenet_v2"""
def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = depth_divisible_by
__snake_case = min_depth
__snake_case = expand_ratio
__snake_case = output_stride
__snake_case = first_layer_is_expansion
__snake_case = finegrained_output
__snake_case = hidden_act
__snake_case = tf_padding
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-4
| 680 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
a : Any = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
a : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
if "://" in dataset_path:
__snake_case = dataset_path.split("://" )[1]
return dataset_path
def __UpperCAmelCase ( _UpperCAmelCase : fsspec.AbstractFileSystem ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __UpperCAmelCase ( _UpperCAmelCase : fsspec.AbstractFileSystem , _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
__snake_case = not is_remote_filesystem(_UpperCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_UpperCAmelCase ) , fs._strip_protocol(_UpperCAmelCase ) )
else:
fs.mv(_UpperCAmelCase , _UpperCAmelCase , recursive=_UpperCAmelCase )
def __UpperCAmelCase ( ) -> None:
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__snake_case = None
__snake_case = None
__snake_case = threading.Lock()
| 719 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680 | 0 |
from string import ascii_uppercase
a : List[str] = {char: i for i, char in enumerate(ascii_uppercase)}
a : str = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
__snake_case = len(_UpperCAmelCase )
__snake_case = 0
while True:
if x == i:
__snake_case = 0
if len(_UpperCAmelCase ) == len(_UpperCAmelCase ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
__snake_case = ""
__snake_case = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__snake_case = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
__snake_case = ""
__snake_case = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__snake_case = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( ) -> None:
__snake_case = "THE GERMAN ATTACK"
__snake_case = "SECRET"
__snake_case = generate_key(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = cipher_text(_UpperCAmelCase , _UpperCAmelCase )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(_UpperCAmelCase , _UpperCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 720 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 680 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = KandinskyVaaControlnetPipeline
__SCREAMING_SNAKE_CASE = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__SCREAMING_SNAKE_CASE = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__SCREAMING_SNAKE_CASE = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__SCREAMING_SNAKE_CASE = False
@property
def A ( self : str ):
"""simple docstring"""
return 32
@property
def A ( self : Any ):
"""simple docstring"""
return 32
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A ( self : Any ):
"""simple docstring"""
return 100
@property
def A ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__snake_case = UNetaDConditionModel(**a_ )
return model
@property
def A ( self : Dict ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def A ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = self.dummy_unet
__snake_case = self.dummy_movq
__snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=a_ , set_alpha_to_one=a_ , steps_offset=1 , prediction_type="epsilon" , thresholding=a_ , )
__snake_case = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A ( self : Optional[Any] , a_ : Dict , a_ : List[str]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(a_ ) ).to(a_ )
__snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
a_ )
# create hint
__snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(a_ ) ).to(a_ )
if str(a_ ).startswith("mps" ):
__snake_case = torch.manual_seed(a_ )
else:
__snake_case = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def A ( self : Any ):
"""simple docstring"""
__snake_case = "cpu"
__snake_case = self.get_dummy_components()
__snake_case = self.pipeline_class(**a_ )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = pipe(**self.get_dummy_inputs(a_ ) )
__snake_case = output.images
__snake_case = pipe(
**self.get_dummy_inputs(a_ ) , return_dict=a_ , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Dict ):
"""simple docstring"""
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" )
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
__snake_case = torch.from_numpy(np.array(a_ ) ).float() / 255.0
__snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__snake_case = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(a_ )
__snake_case = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
__snake_case = pipeline.to(a_ )
pipeline.set_progress_bar_config(disable=a_ )
__snake_case = "A robot, 4k photo"
__snake_case = torch.Generator(device="cuda" ).manual_seed(0 )
__snake_case , __snake_case = pipe_prior(
a_ , generator=a_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__snake_case = torch.Generator(device="cuda" ).manual_seed(0 )
__snake_case = pipeline(
image_embeds=a_ , negative_image_embeds=a_ , hint=a_ , generator=a_ , num_inference_steps=100 , output_type="np" , )
__snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(a_ , a_ )
| 721 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680 | 0 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : int = Mock()
lowerCAmelCase : str = conn, Mock()
lowerCAmelCase : Union[str, Any] = iter([1, None] )
lowerCAmelCase : List[str] = lambda SCREAMING_SNAKE_CASE : next(SCREAMING_SNAKE_CASE )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=SCREAMING_SNAKE_CASE )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 681 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[Any] = embeddings_size
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : str = is_training
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : Tuple = scope
lowerCAmelCase : int = len(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFResNetModel(config=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : str = TFResNetForImageClassification(snake_case__ )
lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Any =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Tuple =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : int =False
a : List[str] =False
a : Optional[int] =False
a : Union[str, Any] =False
a : Any =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFResNetModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Dict = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : Optional[Any] = layer_type
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFResNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase : Any = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : str = model(**snake_case__ )
# verify the logits
lowerCAmelCase : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 681 | 1 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCAmelCase : int = model_type_to_module_name(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = importlib.import_module(f""".{module_name}""" , "transformers.models" )
try:
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE , "__name__" , SCREAMING_SNAKE_CASE ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCAmelCase : Optional[Any] = importlib.import_module("transformers" )
if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return None
def a__ ( SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = get_file_from_repo(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as reader:
return json.load(SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(snake_case__ )
def lowercase__ ( cls , snake_case__ , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = kwargs.pop("config" , snake_case__ )
lowerCAmelCase : Optional[Any] = kwargs.pop("trust_remote_code" , snake_case__ )
lowerCAmelCase : Dict = True
lowerCAmelCase , lowerCAmelCase : Optional[Any] = FeatureExtractionMixin.get_feature_extractor_dict(snake_case__ , **snake_case__ )
lowerCAmelCase : Any = config_dict.get("feature_extractor_type" , snake_case__ )
lowerCAmelCase : Tuple = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
lowerCAmelCase : Tuple = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : int = AutoConfig.from_pretrained(snake_case__ , **snake_case__ )
# It could be in `config.feature_extractor_type``
lowerCAmelCase : Optional[Any] = getattr(snake_case__ , "feature_extractor_type" , snake_case__ )
if hasattr(snake_case__ , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
lowerCAmelCase : Optional[Any] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
lowerCAmelCase : Dict = feature_extractor_class_from_name(snake_case__ )
lowerCAmelCase : Tuple = feature_extractor_auto_map is not None
lowerCAmelCase : Optional[int] = feature_extractor_class is not None or type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING
lowerCAmelCase : Dict = resolve_trust_remote_code(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if has_remote_code and trust_remote_code:
lowerCAmelCase : Any = get_class_from_dynamic_module(
snake_case__ , snake_case__ , **snake_case__ )
lowerCAmelCase : str = kwargs.pop("code_revision" , snake_case__ )
if os.path.isdir(snake_case__ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(snake_case__ , **snake_case__ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(snake_case__ , **snake_case__ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(snake_case__ ) in FEATURE_EXTRACTOR_MAPPING:
lowerCAmelCase : Union[str, Any] = FEATURE_EXTRACTOR_MAPPING[type(snake_case__ )]
return feature_extractor_class.from_dict(snake_case__ , **snake_case__ )
raise ValueError(
f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowercase__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(snake_case__ , snake_case__ )
| 681 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : int = n - 1
lowerCAmelCase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : Optional[Any] = 0
while count < prec:
lowerCAmelCase : List[str] = random.randint(2 , n - 1 )
lowerCAmelCase : Tuple = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
lowerCAmelCase : List[str] = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCAmelCase : List[str] = False
break
lowerCAmelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 681 | 1 |
"""simple docstring"""
from math import factorial, pi
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int = 3_0 ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
lowerCAmelCase : List[Any] = float(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int = 3_0 ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
lowerCAmelCase : str = float(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 681 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : CommonSchedulerState
# setable values
a : jnp.ndarray
a : jnp.ndarray
a : Optional[int] =None
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : DDPMSchedulerState
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
a : Union[str, Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
a : jnp.dtype
@property
def lowercase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case__ = 1_000 , snake_case__ = 0.0001 , snake_case__ = 0.02 , snake_case__ = "linear" , snake_case__ = None , snake_case__ = "fixed_small" , snake_case__ = True , snake_case__ = "epsilon" , snake_case__ = jnp.floataa , ):
"""simple docstring"""
lowerCAmelCase : Any = dtype
def lowercase__ ( self , snake_case__ = None ):
"""simple docstring"""
if common is None:
lowerCAmelCase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase : str = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
"""simple docstring"""
return sample
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = () ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase : Any = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = state.common.alphas_cumprod[t]
lowerCAmelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase : List[Any] = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase : List[str] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase : List[str] = variance
lowerCAmelCase : Dict = state.common.betas[t]
lowerCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
lowerCAmelCase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = timestep
if key is None:
lowerCAmelCase : Tuple = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
lowerCAmelCase : Tuple = None
# 1. compute alphas, betas
lowerCAmelCase : Optional[int] = state.common.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Optional[int] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase : Tuple = jax.random.split(snake_case__ , num=1 )
lowerCAmelCase : str = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
lowerCAmelCase : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def a__ ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
def a__ ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
if dataset.ndim != value_array.ndim:
lowerCAmelCase : str = (
"Wrong input data's dimensions... "
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase : Optional[int] = (
"Wrong input data's shape... "
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
lowerCAmelCase : Optional[Any] = (
"Input data have different datatype... "
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
for value in value_array:
lowerCAmelCase : str = euclidean(SCREAMING_SNAKE_CASE , dataset[0] )
lowerCAmelCase : List[Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase : List[Any] = euclidean(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if dist > temp_dist:
lowerCAmelCase : List[Any] = temp_dist
lowerCAmelCase : List[Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def a__ ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
return np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / (norm(SCREAMING_SNAKE_CASE ) * norm(SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = OmegaConf.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
lowerCAmelCase : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = "first_stage_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = state_dict[key]
lowerCAmelCase : List[str] = config.model.params.first_stage_config.params
lowerCAmelCase : List[Any] = config.model.params.unet_config.params
lowerCAmelCase : Union[str, Any] = VQModel(**SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = UNetLDMModel(**SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Tuple = LDMPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 681 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 681 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 681 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=snake_case__ , )
assert hasattr(self , "env" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = {
"enabled": True,
"processes_per_host": 8,
}
lowerCAmelCase : List[Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowerCAmelCase : List[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowerCAmelCase : Optional[Any] = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="py36" , )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
TrainingJobAnalytics(snake_case__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case__ )
| 681 | 1 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(r'''\[([^\]]+)\]''')
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]="" , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
lowerCAmelCase : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : List[str] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : Optional[Any] = []
else:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def _inner(SCREAMING_SNAKE_CASE : Optional[Any] ):
return key(SCREAMING_SNAKE_CASE ).lower().replace("_" , "" )
return _inner
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
def noop(SCREAMING_SNAKE_CASE : List[Any] ):
return x
if key is None:
lowerCAmelCase : int = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : Dict = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : List[Any] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCAmelCase : Dict = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def _replace(SCREAMING_SNAKE_CASE : List[Any] ):
lowerCAmelCase : List[str] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase : Dict = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
lowerCAmelCase : List[Any] = import_statement.split("\n" )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Tuple = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase : Optional[Any] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
lowerCAmelCase : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[str] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Union[str, Any] = keys[:-1]
lowerCAmelCase : str = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Any = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=True ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[str] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : Tuple = main_blocks[block_idx]
lowerCAmelCase : Optional[Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Tuple = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : Tuple = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : int = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
lowerCAmelCase : Union[str, Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : List[Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write("\n".join(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : List[str]=True ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" ) , check_only=SCREAMING_SNAKE_CASE )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 681 |
"""simple docstring"""
from math import factorial
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ):
'''simple docstring'''
return sum(int(SCREAMING_SNAKE_CASE ) for x in str(factorial(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 681 | 1 |
"""simple docstring"""
import pytest
lowerCAmelCase__ = '''__dummy_dataset1__'''
lowerCAmelCase__ = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a__ ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = dataset_loading_script_name
lowerCAmelCase : Optional[int] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = script_dir / f"""{script_name}.py"""
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE )
return str(SCREAMING_SNAKE_CASE )
| 681 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = data
lowerCAmelCase : Any = None
def __repr__( self ):
"""simple docstring"""
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCAmelCase : Union[str, Any] = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : int = current.next
lowerCAmelCase : List[str] = data
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCAmelCase : Optional[int] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : Any = new_node
elif index == 0:
lowerCAmelCase : Any = self.head # link new_node to head
lowerCAmelCase : Union[str, Any] = new_node
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : int = temp.next
lowerCAmelCase : Dict = new_node
def lowercase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCAmelCase : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase : Optional[int] = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Optional[Any] = temp.next
lowerCAmelCase : Any = temp.next.next
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return self.head is None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase : Dict = prev
# Make the previous node be the current node
lowerCAmelCase : List[str] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : int = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Tuple = prev
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(SCREAMING_SNAKE_CASE ) == 9
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase : List[str] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : str = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase : Any = input("Enter New Value: " ).strip()
print("New list:" )
print(SCREAMING_SNAKE_CASE )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = 2**power
lowerCAmelCase : Dict = str(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = list(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = 0
for i in list_num:
sum_of_num += int(SCREAMING_SNAKE_CASE )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase__ = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase__ = solution(power)
print('''Sum of the digits is: ''', result)
| 681 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 | 1 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=0.0 , snake_case__ = None , snake_case__ = "geglu" , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = "layer_norm" , snake_case__ = False , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : Any = only_cross_attention
lowerCAmelCase : Any = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
lowerCAmelCase : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCAmelCase : Optional[Any] = AdaLayerNorm(snake_case__ , snake_case__ )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase : Optional[int] = AdaLayerNormZero(snake_case__ , snake_case__ )
else:
lowerCAmelCase : List[str] = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
lowerCAmelCase : List[str] = Attention(
query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCAmelCase : Tuple = (
AdaLayerNorm(snake_case__ , snake_case__ )
if self.use_ada_layer_norm
else nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
)
lowerCAmelCase : Optional[Any] = Attention(
query_dim=snake_case__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , upcast_attention=snake_case__ , ) # is self-attn if encoder_hidden_states is none
else:
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = None
# 3. Feed-forward
lowerCAmelCase : Tuple = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
lowerCAmelCase : str = FeedForward(snake_case__ , dropout=snake_case__ , activation_fn=snake_case__ , final_dropout=snake_case__ )
# let chunk size default to None
lowerCAmelCase : str = None
lowerCAmelCase : Tuple = 0
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = chunk_size
lowerCAmelCase : Optional[Any] = dim
def lowercase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , ):
"""simple docstring"""
if self.use_ada_layer_norm:
lowerCAmelCase : Union[str, Any] = self.norma(snake_case__ , snake_case__ )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.norma(
snake_case__ , snake_case__ , snake_case__ , hidden_dtype=hidden_states.dtype )
else:
lowerCAmelCase : Tuple = self.norma(snake_case__ )
lowerCAmelCase : Tuple = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCAmelCase : List[str] = self.attna(
snake_case__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case__ , **snake_case__ , )
if self.use_ada_layer_norm_zero:
lowerCAmelCase : List[str] = gate_msa.unsqueeze(1 ) * attn_output
lowerCAmelCase : int = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCAmelCase : Optional[int] = (
self.norma(snake_case__ , snake_case__ ) if self.use_ada_layer_norm else self.norma(snake_case__ )
)
lowerCAmelCase : Optional[int] = self.attna(
snake_case__ , encoder_hidden_states=snake_case__ , attention_mask=snake_case__ , **snake_case__ , )
lowerCAmelCase : Dict = attn_output + hidden_states
# 3. Feed-forward
lowerCAmelCase : List[Any] = self.norma(snake_case__ )
if self.use_ada_layer_norm_zero:
lowerCAmelCase : int = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
lowerCAmelCase : Tuple = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCAmelCase : Optional[Any] = torch.cat(
[self.ff(snake_case__ ) for hid_slice in norm_hidden_states.chunk(snake_case__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCAmelCase : List[Any] = self.ff(snake_case__ )
if self.use_ada_layer_norm_zero:
lowerCAmelCase : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
lowerCAmelCase : List[str] = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = None , snake_case__ = 4 , snake_case__ = 0.0 , snake_case__ = "geglu" , snake_case__ = False , ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : str = int(dim * mult )
lowerCAmelCase : Dict = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCAmelCase : Tuple = GELU(snake_case__ , snake_case__ )
if activation_fn == "gelu-approximate":
lowerCAmelCase : str = GELU(snake_case__ , snake_case__ , approximate="tanh" )
elif activation_fn == "geglu":
lowerCAmelCase : Any = GEGLU(snake_case__ , snake_case__ )
elif activation_fn == "geglu-approximate":
lowerCAmelCase : Optional[int] = ApproximateGELU(snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = nn.ModuleList([] )
# project in
self.net.append(snake_case__ )
# project dropout
self.net.append(nn.Dropout(snake_case__ ) )
# project out
self.net.append(nn.Linear(snake_case__ , snake_case__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(snake_case__ ) )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
for module in self.net:
lowerCAmelCase : Any = module(snake_case__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = "none" ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : Optional[Any] = nn.Linear(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = approximate
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(snake_case__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.proj(snake_case__ )
lowerCAmelCase : List[Any] = self.gelu(snake_case__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : Dict = nn.Linear(snake_case__ , dim_out * 2 )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(snake_case__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : int = self.proj(snake_case__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(snake_case__ )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : Optional[Any] = nn.Linear(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.proj(snake_case__ )
return x * torch.sigmoid(1.702 * x )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : Optional[Any] = nn.Embedding(snake_case__ , snake_case__ )
lowerCAmelCase : str = nn.SiLU()
lowerCAmelCase : Dict = nn.Linear(snake_case__ , embedding_dim * 2 )
lowerCAmelCase : Optional[int] = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.linear(self.silu(self.emb(snake_case__ ) ) )
lowerCAmelCase , lowerCAmelCase : List[str] = torch.chunk(snake_case__ , 2 )
lowerCAmelCase : Optional[Any] = self.norm(snake_case__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : str = CombinedTimestepLabelEmbeddings(snake_case__ , snake_case__ )
lowerCAmelCase : Any = nn.SiLU()
lowerCAmelCase : List[Any] = nn.Linear(snake_case__ , 6 * embedding_dim , bias=snake_case__ )
lowerCAmelCase : Optional[Any] = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ , eps=1e-6 )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Any = self.linear(self.silu(self.emb(snake_case__ , snake_case__ , hidden_dtype=snake_case__ ) ) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = emb.chunk(6 , dim=1 )
lowerCAmelCase : Dict = self.norm(snake_case__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = 1e-5 ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : List[str] = num_groups
lowerCAmelCase : Dict = eps
if act_fn is None:
lowerCAmelCase : Union[str, Any] = None
else:
lowerCAmelCase : Tuple = get_activation(snake_case__ )
lowerCAmelCase : Optional[int] = nn.Linear(snake_case__ , out_dim * 2 )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.act:
lowerCAmelCase : List[str] = self.act(snake_case__ )
lowerCAmelCase : str = self.linear(snake_case__ )
lowerCAmelCase : Any = emb[:, :, None, None]
lowerCAmelCase , lowerCAmelCase : Optional[int] = emb.chunk(2 , dim=1 )
lowerCAmelCase : Tuple = F.group_norm(snake_case__ , self.num_groups , eps=self.eps )
lowerCAmelCase : Optional[Any] = x * (1 + scale) + shift
return x
| 681 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf_8" ) as f:
lowerCAmelCase : Tuple = csv.reader(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
next(SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for dataset in encoded_datasets:
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
lowerCAmelCase : List[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Tuple = with_conta
lowerCAmelCase : Any = with_conta
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Optional[Any] = with_conta
lowerCAmelCase : List[Any] = with_conta
lowerCAmelCase : str = mc_label
lowerCAmelCase : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE , default=3_7_4 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
lowerCAmelCase : Tuple = parser.parse_args()
print(SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase : str = ["_start_", "_delimiter_", "_classify_"]
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
model.to(SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("Encoding dataset..." )
lowerCAmelCase : Optional[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase : int = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase : Tuple = (train_dataset, eval_dataset)
lowerCAmelCase : Dict = tokenize_and_encode(SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
lowerCAmelCase : Any = model.config.n_positions // 2 - 2
lowerCAmelCase : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase : Any = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Tuple = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase : List[str] = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = RandomSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
lowerCAmelCase : int = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = SequentialSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase : int = args.max_steps
lowerCAmelCase : str = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase : Dict = list(model.named_parameters() )
lowerCAmelCase : str = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
lowerCAmelCase : Tuple = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
lowerCAmelCase : Tuple = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase : str = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
if args.do_train:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = tqdm(SCREAMING_SNAKE_CASE , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = batch
lowerCAmelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase : int = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase : Any = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
lowerCAmelCase , lowerCAmelCase : Optional[int] = 0, 0
lowerCAmelCase , lowerCAmelCase : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE , desc="Evaluating" ):
lowerCAmelCase : List[Any] = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = batch
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = mc_logits.detach().cpu().numpy()
lowerCAmelCase : List[str] = mc_labels.to("cpu" ).numpy()
lowerCAmelCase : Any = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase : List[Any] = eval_loss / nb_eval_steps
lowerCAmelCase : List[Any] = eval_accuracy / nb_eval_examples
lowerCAmelCase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
lowerCAmelCase : List[str] = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = OmegaConf.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
lowerCAmelCase : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = "first_stage_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = state_dict[key]
lowerCAmelCase : List[str] = config.model.params.first_stage_config.params
lowerCAmelCase : List[Any] = config.model.params.unet_config.params
lowerCAmelCase : Union[str, Any] = VQModel(**SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = UNetLDMModel(**SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Tuple = LDMPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 681 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="informer"
a : int ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = None , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 0.05 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , snake_case__ = "prob" , snake_case__ = 5 , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = prediction_length
lowerCAmelCase : Union[str, Any] = context_length or prediction_length
lowerCAmelCase : List[Any] = distribution_output
lowerCAmelCase : Optional[int] = loss
lowerCAmelCase : Optional[int] = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase : Dict = scaling
lowerCAmelCase : List[str] = num_dynamic_real_features
lowerCAmelCase : Dict = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[str] = cardinality
else:
lowerCAmelCase : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[Any] = embedding_dimension
else:
lowerCAmelCase : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : str = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : int = decoder_attention_heads
lowerCAmelCase : Optional[Any] = encoder_ffn_dim
lowerCAmelCase : Dict = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : int = activation_dropout
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Optional[Any] = use_cache
# Informer
lowerCAmelCase : Dict = attention_type
lowerCAmelCase : Any = sampling_factor
lowerCAmelCase : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 681 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : List[Any] =StableDiffusionLDMaDPipeline
a : int =TEXT_TO_IMAGE_PARAMS
a : Optional[int] =TEXT_TO_IMAGE_BATCH_PARAMS
a : Any =TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCAmelCase : List[str] = CLIPTextModel(snake_case__ )
lowerCAmelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith("mps" ):
lowerCAmelCase : Any = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : List[str] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : List[str] = self.get_dummy_components()
lowerCAmelCase : Optional[int] = StableDiffusionLDMaDPipeline(**snake_case__ )
lowerCAmelCase : Dict = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Any = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : int = ldmad_pipe(**snake_case__ )
lowerCAmelCase , lowerCAmelCase : Tuple = output.rgb, output.depth
lowerCAmelCase : Any = rgb[0, -3:, -3:, -1]
lowerCAmelCase : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase : Optional[Any] = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
lowerCAmelCase : str = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.get_dummy_components()
lowerCAmelCase : Any = StableDiffusionLDMaDPipeline(**snake_case__ )
lowerCAmelCase : int = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : List[str] = 3 * [inputs["prompt"]]
# forward
lowerCAmelCase : List[Any] = ldmad_pipe(**snake_case__ )
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = output.rgb, output.depth
lowerCAmelCase : Tuple = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase : Any = depth_slice_a[0, -3:, -1]
lowerCAmelCase : Tuple = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : int = 3 * [inputs.pop("prompt" )]
lowerCAmelCase : int = ldmad_pipe.tokenizer(
snake_case__ , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="pt" , )
lowerCAmelCase : List[Any] = text_inputs["input_ids"].to(snake_case__ )
lowerCAmelCase : Tuple = ldmad_pipe.text_encoder(snake_case__ )[0]
lowerCAmelCase : List[str] = prompt_embeds
# forward
lowerCAmelCase : str = ldmad_pipe(**snake_case__ )
lowerCAmelCase , lowerCAmelCase : List[str] = output.rgb, output.depth
lowerCAmelCase : List[str] = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase : List[str] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : List[str] = self.get_dummy_components()
lowerCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=snake_case__ )
lowerCAmelCase : List[Any] = StableDiffusionLDMaDPipeline(**snake_case__ )
lowerCAmelCase : Any = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : int = "french fries"
lowerCAmelCase : Tuple = ldmad_pipe(**snake_case__ , negative_prompt=snake_case__ )
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = output.rgb, output.depth
lowerCAmelCase : str = rgb[0, -3:, -3:, -1]
lowerCAmelCase : List[str] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase : Any = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
lowerCAmelCase : List[Any] = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self , snake_case__ , snake_case__="cpu" , snake_case__=torch.floataa , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : str = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Optional[Any] = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : Union[str, Any] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCAmelCase : Tuple = ldmad_pipe.to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : List[Any] = self.get_inputs(snake_case__ )
lowerCAmelCase : List[Any] = ldmad_pipe(**snake_case__ )
lowerCAmelCase , lowerCAmelCase : Optional[int] = output.rgb, output.depth
lowerCAmelCase : List[Any] = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase : Tuple = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
lowerCAmelCase : Union[str, Any] = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
lowerCAmelCase : Tuple = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self , snake_case__ , snake_case__="cpu" , snake_case__=torch.floataa , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : List[str] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Union[str, Any] = np.random.RandomState(snake_case__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase : List[Any] = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
lowerCAmelCase : str = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : int = self.get_inputs(snake_case__ )
lowerCAmelCase : str = ldmad_pipe(**snake_case__ )
lowerCAmelCase , lowerCAmelCase : Optional[int] = output.rgb, output.depth
lowerCAmelCase : Any = 0.495586
lowerCAmelCase : int = 0.33795515
lowerCAmelCase : Optional[int] = 112.48518
lowerCAmelCase : int = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(snake_case__ )
ldmad_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[Any] = self.get_inputs(snake_case__ )
lowerCAmelCase : Tuple = ldmad_pipe(**snake_case__ )
lowerCAmelCase , lowerCAmelCase : int = output.rgb, output.depth
lowerCAmelCase : Tuple = 0.4194127
lowerCAmelCase : Dict = 0.35375586
lowerCAmelCase : Union[str, Any] = 0.5638502
lowerCAmelCase : Union[str, Any] = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[str] ="wav2vec2"
def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(512, 512, 512, 512, 512, 512, 512) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=False , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=0 , snake_case__=320 , snake_case__=2 , snake_case__=0.1 , snake_case__=100 , snake_case__=256 , snake_case__=256 , snake_case__=0.1 , snake_case__="sum" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=(512, 512, 512, 512, 1_500) , snake_case__=(5, 3, 3, 1, 1) , snake_case__=(1, 2, 3, 1, 1) , snake_case__=512 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=3 , snake_case__=2 , snake_case__=3 , snake_case__=None , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : List[Any] = feat_extract_norm
lowerCAmelCase : Optional[int] = feat_extract_activation
lowerCAmelCase : int = list(snake_case__ )
lowerCAmelCase : int = list(snake_case__ )
lowerCAmelCase : Optional[int] = list(snake_case__ )
lowerCAmelCase : Any = conv_bias
lowerCAmelCase : Tuple = num_conv_pos_embeddings
lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
lowerCAmelCase : str = len(self.conv_dim )
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : str = intermediate_size
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Any = hidden_dropout
lowerCAmelCase : Union[str, Any] = attention_dropout
lowerCAmelCase : Tuple = activation_dropout
lowerCAmelCase : Tuple = feat_proj_dropout
lowerCAmelCase : int = final_dropout
lowerCAmelCase : int = layerdrop
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : List[str] = do_stable_layer_norm
lowerCAmelCase : List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase : Any = apply_spec_augment
lowerCAmelCase : Any = mask_time_prob
lowerCAmelCase : List[Any] = mask_time_length
lowerCAmelCase : Optional[Any] = mask_time_min_masks
lowerCAmelCase : Optional[Any] = mask_feature_prob
lowerCAmelCase : Tuple = mask_feature_length
lowerCAmelCase : Tuple = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase : Any = num_codevectors_per_group
lowerCAmelCase : Optional[int] = num_codevector_groups
lowerCAmelCase : Any = contrastive_logits_temperature
lowerCAmelCase : Optional[Any] = feat_quantizer_dropout
lowerCAmelCase : Tuple = num_negatives
lowerCAmelCase : Tuple = codevector_dim
lowerCAmelCase : int = proj_codevector_dim
lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
lowerCAmelCase : Optional[int] = ctc_loss_reduction
lowerCAmelCase : Tuple = ctc_zero_infinity
# adapter
lowerCAmelCase : List[Any] = add_adapter
lowerCAmelCase : str = adapter_kernel_size
lowerCAmelCase : Optional[int] = adapter_stride
lowerCAmelCase : Dict = num_adapter_layers
lowerCAmelCase : Any = output_hidden_size or hidden_size
lowerCAmelCase : Dict = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase : str = list(snake_case__ )
lowerCAmelCase : Union[str, Any] = list(snake_case__ )
lowerCAmelCase : int = list(snake_case__ )
lowerCAmelCase : Tuple = xvector_output_dim
@property
def lowercase__ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 681 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase__ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCAmelCase : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(snake_case__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase : int = black.format_str(snake_case__ , mode=snake_case__ )
lowerCAmelCase : Dict = os.path.join(self.diffusers_dir , "new_code.py" )
with open(snake_case__ , "w" , newline="\n" ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case__ )
with open(snake_case__ , "r" ) as f:
self.assertTrue(f.read() , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , snake_case__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , snake_case__ ) , )
# Copy consistency with a really long name
lowerCAmelCase : Union[str, Any] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , snake_case__ , snake_case__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , snake_case__ , overwrite_result=re.sub("DDPM" , "Test" , snake_case__ ) , )
| 681 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =StableDiffusionPanoramaPipeline
a : str =TEXT_TO_IMAGE_PARAMS
a : str =TEXT_TO_IMAGE_BATCH_PARAMS
a : Tuple =TEXT_TO_IMAGE_IMAGE_PARAMS
a : Optional[int] =TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCAmelCase : Any = DDIMScheduler()
torch.manual_seed(0 )
lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowerCAmelCase : Any = CLIPTextModel(snake_case__ )
lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : Any = torch.manual_seed(snake_case__ )
lowerCAmelCase : Optional[int] = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Any = self.get_dummy_components()
lowerCAmelCase : Any = StableDiffusionPanoramaPipeline(**snake_case__ )
lowerCAmelCase : Optional[int] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : List[Any] = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Union[str, Any] = sd_pipe(**snake_case__ ).images
lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : Union[str, Any] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[int] = self.get_dummy_components()
lowerCAmelCase : List[str] = StableDiffusionPanoramaPipeline(**snake_case__ )
lowerCAmelCase : Optional[Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Dict = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Optional[int] = "french fries"
lowerCAmelCase : List[str] = sd_pipe(**snake_case__ , negative_prompt=snake_case__ )
lowerCAmelCase : int = output.images
lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : List[str] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : str = self.get_dummy_components()
lowerCAmelCase : List[str] = StableDiffusionPanoramaPipeline(**snake_case__ )
lowerCAmelCase : Any = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : str = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Optional[int] = sd_pipe(**snake_case__ , view_batch_size=2 )
lowerCAmelCase : Optional[int] = output.images
lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : int = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : List[Any] = self.get_dummy_components()
lowerCAmelCase : Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" )
lowerCAmelCase : Optional[Any] = StableDiffusionPanoramaPipeline(**snake_case__ )
lowerCAmelCase : Dict = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : str = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : List[Any] = sd_pipe(**snake_case__ ).images
lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : List[Any] = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase : Optional[int] = self.get_dummy_components()
lowerCAmelCase : Dict = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=snake_case__ )
lowerCAmelCase : List[str] = StableDiffusionPanoramaPipeline(**snake_case__ )
lowerCAmelCase : Optional[Any] = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase : Optional[int] = self.get_dummy_inputs(snake_case__ )
lowerCAmelCase : Any = sd_pipe(**snake_case__ ).images
lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : int = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self , snake_case__=0 ):
"""simple docstring"""
lowerCAmelCase : List[Any] = torch.manual_seed(snake_case__ )
lowerCAmelCase : Tuple = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = "stabilityai/stable-diffusion-2-base"
lowerCAmelCase : Optional[Any] = DDIMScheduler.from_pretrained(snake_case__ , subfolder="scheduler" )
lowerCAmelCase : str = StableDiffusionPanoramaPipeline.from_pretrained(snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : Dict = self.get_inputs()
lowerCAmelCase : Any = pipe(**snake_case__ ).images
lowerCAmelCase : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
lowerCAmelCase : Optional[int] = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=snake_case__ )
lowerCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : Tuple = self.get_inputs()
lowerCAmelCase : int = pipe(**snake_case__ ).images
lowerCAmelCase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
lowerCAmelCase : List[str] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = 0
def callback_fn(snake_case__ , snake_case__ , snake_case__ ) -> None:
lowerCAmelCase : str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCAmelCase : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCAmelCase : str = latents[0, -3:, -3:, -1]
lowerCAmelCase : Tuple = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCAmelCase : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowerCAmelCase : str = latents[0, -3:, -3:, -1]
lowerCAmelCase : str = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCAmelCase : Any = False
lowerCAmelCase : str = "stabilityai/stable-diffusion-2-base"
lowerCAmelCase : Optional[Any] = DDIMScheduler.from_pretrained(snake_case__ , subfolder="scheduler" )
lowerCAmelCase : str = StableDiffusionPanoramaPipeline.from_pretrained(snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ )
lowerCAmelCase : Dict = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
lowerCAmelCase : str = self.get_inputs()
pipe(**snake_case__ , callback=snake_case__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase : int = "stabilityai/stable-diffusion-2-base"
lowerCAmelCase : Tuple = DDIMScheduler.from_pretrained(snake_case__ , subfolder="scheduler" )
lowerCAmelCase : int = StableDiffusionPanoramaPipeline.from_pretrained(snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ )
lowerCAmelCase : Tuple = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase : int = self.get_inputs()
lowerCAmelCase : int = pipe(**snake_case__ )
lowerCAmelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 681 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE ) + 1 ):
lowerCAmelCase : int = [x.match(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def replace(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Any = _get_partition_rules()
lowerCAmelCase : Tuple = _replacement_rules(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE )}
lowerCAmelCase : List[Any] = {k: replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE ) )
| 681 | 1 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE ) + 1 ):
lowerCAmelCase : int = [x.match(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def replace(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Any = _get_partition_rules()
lowerCAmelCase : Tuple = _replacement_rules(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE )}
lowerCAmelCase : List[Any] = {k: replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE ) )
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 , SCREAMING_SNAKE_CASE : int = 2_2 ):
'''simple docstring'''
lowerCAmelCase : Dict = range(1 , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = range(1 , SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 681 | 1 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def a__ ( SCREAMING_SNAKE_CASE : int = 3 ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(SCREAMING_SNAKE_CASE ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 1_0:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase : str = QuantumRegister(SCREAMING_SNAKE_CASE , "qr" )
lowerCAmelCase : Any = ClassicalRegister(SCREAMING_SNAKE_CASE , "cr" )
lowerCAmelCase : int = QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(SCREAMING_SNAKE_CASE ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(SCREAMING_SNAKE_CASE , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# simulate with 10000 shots
lowerCAmelCase : Optional[int] = Aer.get_backend("qasm_simulator" )
lowerCAmelCase : Optional[Any] = execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_0_0_0_0 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase : List[str] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase : str = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
lowerCAmelCase : str = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 681 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 681 | 1 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
lowerCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 681 |
"""simple docstring"""
import math
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
lowerCAmelCase : List[str] = n
while left <= right:
lowerCAmelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase : int = mid - 1
else:
lowerCAmelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] ="vit"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=16 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Tuple = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : str = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] =version.parse("1.11" )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 681 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ConvNextFeatureExtractor''']
lowerCAmelCase__ = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 681 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 681 | 1 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : Dict = emb.weight.shape
lowerCAmelCase : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = emb.weight.data
return lin_layer
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )
lowerCAmelCase : Dict = Namespace(**checkpoint["cfg"]["model"] )
lowerCAmelCase : Tuple = checkpoint["model"]
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = state_dict["decoder.embed_tokens.weight"].shape[0]
lowerCAmelCase : Optional[int] = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
lowerCAmelCase : Optional[int] = XGLMConfig(
vocab_size=SCREAMING_SNAKE_CASE , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase : str = XGLMForCausalLM(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 681 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="resnet50" , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=True , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : List[str] = parent
lowerCAmelCase : Union[str, Any] = out_indices if out_indices is not None else [4]
lowerCAmelCase : Tuple = stage_names
lowerCAmelCase : Any = out_features
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : Tuple = is_training
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values
def lowercase__ ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TimmBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase : Tuple = config_and_inputs
lowerCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(TimmBackbone,) if is_torch_available() else ()
a : Union[str, Any] ={"feature-extraction": TimmBackbone} if is_torch_available() else {}
a : Tuple =False
a : List[Any] =False
a : Optional[Any] =False
a : Dict =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TimmBackboneModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "resnet18"
lowerCAmelCase : str = "microsoft/resnet-18"
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ )
lowerCAmelCase : List[str] = AutoBackbone.from_pretrained(snake_case__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ , out_indices=[1, 2, 3] )
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : int = True
lowerCAmelCase : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase : Optional[int] = self.all_model_classes[0]
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = model(**snake_case__ )
lowerCAmelCase : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase : Dict = copy.deepcopy(snake_case__ )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase : Optional[int] = copy.deepcopy(snake_case__ )
lowerCAmelCase : List[str] = False
lowerCAmelCase : int = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[Any] = model(**snake_case__ )
| 681 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : str ="roberta-prelayernorm"
def __init__( self , snake_case__=50_265 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : List[Any] = hidden_size
lowerCAmelCase : Union[str, Any] = num_hidden_layers
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : Any = max_position_embeddings
lowerCAmelCase : Any = type_vocab_size
lowerCAmelCase : Tuple = initializer_range
lowerCAmelCase : Optional[int] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : Any = use_cache
lowerCAmelCase : Optional[int] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 681 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(r'''\[([^\]]+)\]''')
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]="" , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
lowerCAmelCase : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : List[str] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : Optional[Any] = []
else:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def _inner(SCREAMING_SNAKE_CASE : Optional[Any] ):
return key(SCREAMING_SNAKE_CASE ).lower().replace("_" , "" )
return _inner
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
def noop(SCREAMING_SNAKE_CASE : List[Any] ):
return x
if key is None:
lowerCAmelCase : int = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : Dict = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : List[Any] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCAmelCase : Dict = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def _replace(SCREAMING_SNAKE_CASE : List[Any] ):
lowerCAmelCase : List[str] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase : Dict = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
lowerCAmelCase : List[Any] = import_statement.split("\n" )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Tuple = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase : Optional[Any] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
lowerCAmelCase : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[str] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Union[str, Any] = keys[:-1]
lowerCAmelCase : str = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Any = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=True ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[str] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : Tuple = main_blocks[block_idx]
lowerCAmelCase : Optional[Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Tuple = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : Tuple = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : int = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
lowerCAmelCase : Union[str, Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : List[Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write("\n".join(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : List[str]=True ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" ) , check_only=SCREAMING_SNAKE_CASE )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : str =(
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a : Tuple =(
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a : Dict =False
a : str =False
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False ):
"""simple docstring"""
lowerCAmelCase : Dict = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
lowerCAmelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=32 , snake_case__=2 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=16 , snake_case__=2 , snake_case__=0.02 , snake_case__=3 , snake_case__=4 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : List[Any] = parent
lowerCAmelCase : List[str] = batch_size
lowerCAmelCase : Optional[Any] = seq_length
lowerCAmelCase : Union[str, Any] = is_training
lowerCAmelCase : str = use_input_mask
lowerCAmelCase : Optional[int] = use_token_type_ids
lowerCAmelCase : str = use_labels
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Dict = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : List[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : Tuple = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : Any = num_labels
lowerCAmelCase : str = num_choices
lowerCAmelCase : Any = scope
lowerCAmelCase : Tuple = embedding_size
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
lowerCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : str = None
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : str = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = TFMobileBertModel(config=snake_case__ )
lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase : Any = model(snake_case__ )
lowerCAmelCase : Optional[int] = [input_ids, input_mask]
lowerCAmelCase : str = model(snake_case__ )
lowerCAmelCase : List[str] = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = TFMobileBertForMaskedLM(config=snake_case__ )
lowerCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = TFMobileBertForNextSentencePrediction(config=snake_case__ )
lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = TFMobileBertForPreTraining(config=snake_case__ )
lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase : List[str] = model(snake_case__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : Dict = TFMobileBertForSequenceClassification(config=snake_case__ )
lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase : Any = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_choices
lowerCAmelCase : Any = TFMobileBertForMultipleChoice(config=snake_case__ )
lowerCAmelCase : Any = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : str = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase : Optional[int] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.num_labels
lowerCAmelCase : Any = TFMobileBertForTokenClassification(config=snake_case__ )
lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase : Optional[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFMobileBertForQuestionAnswering(config=snake_case__ )
lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
lowerCAmelCase : Any = TFMobileBertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : List[str] = model(snake_case__ )[0]
lowerCAmelCase : Optional[int] = [1, 6, 30_522]
self.assertEqual(output.shape , snake_case__ )
lowerCAmelCase : List[str] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-4 )
| 681 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[Any] = embeddings_size
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : str = is_training
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : Tuple = scope
lowerCAmelCase : int = len(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFResNetModel(config=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : str = TFResNetForImageClassification(snake_case__ )
lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Any =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Tuple =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : int =False
a : List[str] =False
a : Optional[int] =False
a : Union[str, Any] =False
a : Any =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFResNetModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Dict = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : Optional[Any] = layer_type
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFResNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase : Any = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : str = model(**snake_case__ )
# verify the logits
lowerCAmelCase : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 681 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
lowerCAmelCase : Optional[int] = gray_code_sequence_string(SCREAMING_SNAKE_CASE )
#
# convert them to integers
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase : Optional[Any] = int(sequence[i] , 2 )
return sequence
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowerCAmelCase : List[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowerCAmelCase : List[str] = gray_code_sequence_string(bit_count - 1 )
lowerCAmelCase : List[Any] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowerCAmelCase : Any = "0" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowerCAmelCase : List[str] = "1" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : int = n - 1
lowerCAmelCase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : Optional[Any] = 0
while count < prec:
lowerCAmelCase : List[str] = random.randint(2 , n - 1 )
lowerCAmelCase : Tuple = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
lowerCAmelCase : List[str] = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCAmelCase : List[str] = False
break
lowerCAmelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 681 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : int = parent
lowerCAmelCase : Dict = batch_size
lowerCAmelCase : str = image_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[str] = embeddings_size
lowerCAmelCase : Optional[int] = hidden_sizes
lowerCAmelCase : Any = depths
lowerCAmelCase : Union[str, Any] = is_training
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Union[str, Any] = num_labels
lowerCAmelCase : Union[str, Any] = scope
lowerCAmelCase : Any = len(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : str = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : int = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = RegNetModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Tuple = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.num_labels
lowerCAmelCase : int = RegNetForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = config_and_inputs
lowerCAmelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : str =(RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
a : int =(
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
a : List[Any] =False
a : Tuple =False
a : Dict =False
a : Any =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = RegNetModelTester(self )
lowerCAmelCase : int = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Any = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(config=snake_case__ )
for name, module in model.named_modules():
if isinstance(snake_case__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Dict = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Dict = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCAmelCase , lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : List[Any] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : Dict = layer_type
lowerCAmelCase : Tuple = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Optional[Any] = RegNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case__ )
lowerCAmelCase : Union[str, Any] = self.default_image_processor
lowerCAmelCase : Any = prepare_img()
lowerCAmelCase : Optional[Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**snake_case__ )
# verify the logits
lowerCAmelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : Optional[Any] = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
| 681 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : CommonSchedulerState
# setable values
a : jnp.ndarray
a : jnp.ndarray
a : Optional[int] =None
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : DDPMSchedulerState
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
a : Union[str, Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
a : jnp.dtype
@property
def lowercase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case__ = 1_000 , snake_case__ = 0.0001 , snake_case__ = 0.02 , snake_case__ = "linear" , snake_case__ = None , snake_case__ = "fixed_small" , snake_case__ = True , snake_case__ = "epsilon" , snake_case__ = jnp.floataa , ):
"""simple docstring"""
lowerCAmelCase : Any = dtype
def lowercase__ ( self , snake_case__ = None ):
"""simple docstring"""
if common is None:
lowerCAmelCase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase : str = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
"""simple docstring"""
return sample
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = () ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase : Any = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = state.common.alphas_cumprod[t]
lowerCAmelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase : List[Any] = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase : List[str] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase : List[str] = variance
lowerCAmelCase : Dict = state.common.betas[t]
lowerCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
lowerCAmelCase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = timestep
if key is None:
lowerCAmelCase : Tuple = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
lowerCAmelCase : Tuple = None
# 1. compute alphas, betas
lowerCAmelCase : Optional[int] = state.common.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Optional[int] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase : Tuple = jax.random.split(snake_case__ , num=1 )
lowerCAmelCase : str = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
lowerCAmelCase : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 681 | 1 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
lowerCAmelCase__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
lowerCAmelCase__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
lowerCAmelCase__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return float((preds == labels).mean() )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]="binary" ):
'''simple docstring'''
lowerCAmelCase : Dict = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {}
for id_pred, label in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[int] = f"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
lowerCAmelCase : str = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase : str = [(pred, label)]
lowerCAmelCase , lowerCAmelCase : List[str] = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase , lowerCAmelCase : str = zip(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average="macro" )
fas.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = int(sum(pred == label for pred, label in preds_labels ) == len(SCREAMING_SNAKE_CASE ) )
ems.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = float(sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : Optional[int] = sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def lowercase__ ( self ):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(snake_case__ , snake_case__ )}
elif self.config_name == "cb":
return acc_and_fa(snake_case__ , snake_case__ , fa_avg="macro" )
elif self.config_name == "record":
lowerCAmelCase : List[str] = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
lowerCAmelCase : Tuple = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(snake_case__ , snake_case__ )[0]
elif self.config_name == "multirc":
return evaluate_multirc(snake_case__ , snake_case__ )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(snake_case__ , snake_case__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 681 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = OmegaConf.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
lowerCAmelCase : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = "first_stage_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = state_dict[key]
lowerCAmelCase : List[str] = config.model.params.first_stage_config.params
lowerCAmelCase : List[Any] = config.model.params.unet_config.params
lowerCAmelCase : Union[str, Any] = VQModel(**SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = UNetLDMModel(**SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Tuple = LDMPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 681 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
lowerCAmelCase__ = {'''mobilebert-uncased''': 512}
lowerCAmelCase__ = {}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Any =VOCAB_FILES_NAMES
a : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
a : List[Any] =PRETRAINED_INIT_CONFIGURATION
a : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] =MobileBertTokenizer
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="[UNK]" , snake_case__="[SEP]" , snake_case__="[PAD]" , snake_case__="[CLS]" , snake_case__="[MASK]" , snake_case__=True , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
lowerCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
lowerCAmelCase : List[str] = getattr(snake_case__ , normalizer_state.pop("type" ) )
lowerCAmelCase : Any = do_lower_case
lowerCAmelCase : int = strip_accents
lowerCAmelCase : Union[str, Any] = tokenize_chinese_chars
lowerCAmelCase : Union[str, Any] = normalizer_class(**snake_case__ )
lowerCAmelCase : str = do_lower_case
def lowercase__ ( self , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : List[Any] = [self.sep_token_id]
lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
lowerCAmelCase : Tuple = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 681 | 1 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : List[str] = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=SCREAMING_SNAKE_CASE , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=SCREAMING_SNAKE_CASE , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=SCREAMING_SNAKE_CASE , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=SCREAMING_SNAKE_CASE , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=SCREAMING_SNAKE_CASE , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=SCREAMING_SNAKE_CASE , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if args.calibrator == "max":
lowerCAmelCase : List[Any] = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
lowerCAmelCase : Any = "histogram"
elif args.calibrator == "mse":
lowerCAmelCase : Dict = "histogram"
else:
raise ValueError(f"""Invalid calibrator {args.calibrator}""" )
lowerCAmelCase : str = QuantDescriptor(num_bits=args.aprec , calib_method=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(SCREAMING_SNAKE_CASE )
quant_nn.QuantLinear.set_default_quant_desc_weight(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : Optional[int]=False ):
'''simple docstring'''
logger.info("Configuring Model for Quantization" )
logger.info(f"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , ["embeddings"] , which="weight" , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [""] , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable_keyword:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , args.quant_disable_keyword , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_enable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=SCREAMING_SNAKE_CASE )
if args.recalibrate_weights:
recalibrate_weights(SCREAMING_SNAKE_CASE )
if args.fuse_qkv:
fuse_qkv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if args.clip_gelu:
clip_gelu(SCREAMING_SNAKE_CASE , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f"""{name:80}: {module}""" )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
def fusea(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
for mod in [qq, qk, qv]:
if not hasattr(SCREAMING_SNAKE_CASE , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
lowerCAmelCase : List[Any] = qq._amax.detach().item()
lowerCAmelCase : List[Any] = qk._amax.detach().item()
lowerCAmelCase : List[Any] = qv._amax.detach().item()
lowerCAmelCase : Optional[int] = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
qq._amax.fill_(SCREAMING_SNAKE_CASE )
qk._amax.fill_(SCREAMING_SNAKE_CASE )
qv._amax.fill_(SCREAMING_SNAKE_CASE )
logger.info(f""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
lowerCAmelCase : Tuple = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
lowerCAmelCase : List[str] = mod.weight.shape[0]
lowerCAmelCase : Any = mod._weight_quantizer._amax.detach()
lowerCAmelCase : Tuple = torch.ones(SCREAMING_SNAKE_CASE , dtype=amax.dtype , device=amax.device ) * amax
print(f"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCAmelCase : List[Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCAmelCase : List[Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCAmelCase : Dict = pytorch_quantization.utils.reduce_amax(mod.weight , axis=SCREAMING_SNAKE_CASE , keepdims=SCREAMING_SNAKE_CASE ).detach()
logger.info(f"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
lowerCAmelCase : Tuple = amax
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any]=2_5 , SCREAMING_SNAKE_CASE : Optional[int]=1_8_0 , SCREAMING_SNAKE_CASE : str=None ):
'''simple docstring'''
if ignore is None:
lowerCAmelCase : List[str] = []
elif not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Any = [ignore]
lowerCAmelCase : List[Any] = 0
for name, mod in model.named_modules():
if not hasattr(SCREAMING_SNAKE_CASE , "weight" ):
continue
lowerCAmelCase : Optional[int] = max(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
for name, mod in model.named_modules():
lowerCAmelCase : Dict = getattr(SCREAMING_SNAKE_CASE , "_input_quantizer" , SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = getattr(SCREAMING_SNAKE_CASE , "_weight_quantizer" , SCREAMING_SNAKE_CASE )
if not hasattr(SCREAMING_SNAKE_CASE , "weight" ):
continue
if type(SCREAMING_SNAKE_CASE ) in ignore:
continue
if [True for s in ignore if type(SCREAMING_SNAKE_CASE ) is str and s in name]:
continue
lowerCAmelCase : Any = f"""Act:{input_q.extra_repr()}"""
lowerCAmelCase : str = f"""Wgt:{weight_q.extra_repr()}"""
lowerCAmelCase : Optional[int] = f"""{name:{name_width}} {act_str} {wgt_str}"""
if len(SCREAMING_SNAKE_CASE ) <= line_width:
logger.info(SCREAMING_SNAKE_CASE )
else:
logger.info(f"""{name:{name_width}} {act_str}""" )
logger.info(f"""{" ":{name_width}} {wgt_str}""" )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
for name, mod in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , pytorch_quantization.nn.TensorQuantizer ):
print(f"""{name:80} {mod}""" )
count += 1
print(f"""{count} TensorQuantizers found in model""" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if quantizer_mod is not None:
assert hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
logger.warning(f"""{name} has no {quantizer}""" )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int]="both" , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : List[str] = f"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "_input_quantizer" , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if which in ["weight", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "_weight_quantizer" , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , "_input_quantizer" ) or hasattr(SCREAMING_SNAKE_CASE , "_weight_quantizer" ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
set_quantizers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[int] = f"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += f""" {k}={v}"""
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(SCREAMING_SNAKE_CASE )
| 681 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=snake_case__ , )
assert hasattr(self , "env" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = {
"enabled": True,
"processes_per_host": 8,
}
lowerCAmelCase : List[Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowerCAmelCase : List[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowerCAmelCase : Optional[Any] = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="py36" , )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
TrainingJobAnalytics(snake_case__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case__ )
| 681 | 1 |
"""simple docstring"""
lowerCAmelCase__ = tuple[float, float, float]
lowerCAmelCase__ = tuple[float, float, float]
def a__ ( SCREAMING_SNAKE_CASE : Pointad , SCREAMING_SNAKE_CASE : Pointad ):
'''simple docstring'''
lowerCAmelCase : int = end_pointa[0] - end_pointa[0]
lowerCAmelCase : Optional[int] = end_pointa[1] - end_pointa[1]
lowerCAmelCase : List[Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def a__ ( SCREAMING_SNAKE_CASE : Vectorad , SCREAMING_SNAKE_CASE : Vectorad ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowerCAmelCase : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowerCAmelCase : int = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def a__ ( SCREAMING_SNAKE_CASE : Vectorad , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return tuple(round(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for x in vector ) == (0, 0, 0)
def a__ ( SCREAMING_SNAKE_CASE : Pointad , SCREAMING_SNAKE_CASE : Pointad , SCREAMING_SNAKE_CASE : Pointad , SCREAMING_SNAKE_CASE : int = 1_0 ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = create_vector(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = create_vector(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return is_zero_vector(get_ad_vectors_cross(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
| 681 |
"""simple docstring"""
from math import factorial
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ):
'''simple docstring'''
return sum(int(SCREAMING_SNAKE_CASE ) for x in str(factorial(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 681 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
a : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 681 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = data
lowerCAmelCase : Any = None
def __repr__( self ):
"""simple docstring"""
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCAmelCase : Union[str, Any] = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : int = current.next
lowerCAmelCase : List[str] = data
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCAmelCase : Optional[int] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : Any = new_node
elif index == 0:
lowerCAmelCase : Any = self.head # link new_node to head
lowerCAmelCase : Union[str, Any] = new_node
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : int = temp.next
lowerCAmelCase : Dict = new_node
def lowercase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCAmelCase : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase : Optional[int] = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Optional[Any] = temp.next
lowerCAmelCase : Any = temp.next.next
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return self.head is None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase : Dict = prev
# Make the previous node be the current node
lowerCAmelCase : List[str] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : int = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Tuple = prev
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(SCREAMING_SNAKE_CASE ) == 9
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase : List[str] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : str = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase : Any = input("Enter New Value: " ).strip()
print("New list:" )
print(SCREAMING_SNAKE_CASE )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[Any] = embeddings_size
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : str = is_training
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : Tuple = scope
lowerCAmelCase : int = len(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFResNetModel(config=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : str = TFResNetForImageClassification(snake_case__ )
lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Any =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Tuple =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : int =False
a : List[str] =False
a : Optional[int] =False
a : Union[str, Any] =False
a : Any =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFResNetModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Dict = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : Optional[Any] = layer_type
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFResNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase : Any = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : str = model(**snake_case__ )
# verify the logits
lowerCAmelCase : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 681 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowerCAmelCase : Optional[int] = number_of_bytes // partitions
lowerCAmelCase : List[Any] = []
for i in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[int] = i * bytes_per_partition + 1
lowerCAmelCase : str = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf_8" ) as f:
lowerCAmelCase : Tuple = csv.reader(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
next(SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for dataset in encoded_datasets:
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
lowerCAmelCase : List[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Tuple = with_conta
lowerCAmelCase : Any = with_conta
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Optional[Any] = with_conta
lowerCAmelCase : List[Any] = with_conta
lowerCAmelCase : str = mc_label
lowerCAmelCase : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE , default=3_7_4 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
lowerCAmelCase : Tuple = parser.parse_args()
print(SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase : str = ["_start_", "_delimiter_", "_classify_"]
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
model.to(SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("Encoding dataset..." )
lowerCAmelCase : Optional[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase : int = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase : Tuple = (train_dataset, eval_dataset)
lowerCAmelCase : Dict = tokenize_and_encode(SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
lowerCAmelCase : Any = model.config.n_positions // 2 - 2
lowerCAmelCase : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase : Any = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Tuple = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase : List[str] = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = RandomSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
lowerCAmelCase : int = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = SequentialSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase : int = args.max_steps
lowerCAmelCase : str = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase : Dict = list(model.named_parameters() )
lowerCAmelCase : str = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
lowerCAmelCase : Tuple = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
lowerCAmelCase : Tuple = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase : str = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
if args.do_train:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = tqdm(SCREAMING_SNAKE_CASE , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = batch
lowerCAmelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase : int = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase : Any = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
lowerCAmelCase , lowerCAmelCase : Optional[int] = 0, 0
lowerCAmelCase , lowerCAmelCase : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE , desc="Evaluating" ):
lowerCAmelCase : List[Any] = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = batch
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = mc_logits.detach().cpu().numpy()
lowerCAmelCase : List[str] = mc_labels.to("cpu" ).numpy()
lowerCAmelCase : Any = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase : List[Any] = eval_loss / nb_eval_steps
lowerCAmelCase : List[Any] = eval_accuracy / nb_eval_examples
lowerCAmelCase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
lowerCAmelCase : List[str] = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = data
lowerCAmelCase : Any = None
def __repr__( self ):
"""simple docstring"""
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCAmelCase : Union[str, Any] = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : int = current.next
lowerCAmelCase : List[str] = data
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCAmelCase : Optional[int] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : Any = new_node
elif index == 0:
lowerCAmelCase : Any = self.head # link new_node to head
lowerCAmelCase : Union[str, Any] = new_node
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : int = temp.next
lowerCAmelCase : Dict = new_node
def lowercase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCAmelCase : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase : Optional[int] = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Optional[Any] = temp.next
lowerCAmelCase : Any = temp.next.next
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return self.head is None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase : Dict = prev
# Make the previous node be the current node
lowerCAmelCase : List[str] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : int = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Tuple = prev
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(SCREAMING_SNAKE_CASE ) == 9
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase : List[str] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : str = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase : Any = input("Enter New Value: " ).strip()
print("New list:" )
print(SCREAMING_SNAKE_CASE )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 681 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="informer"
a : int ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = None , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 0.05 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , snake_case__ = "prob" , snake_case__ = 5 , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = prediction_length
lowerCAmelCase : Union[str, Any] = context_length or prediction_length
lowerCAmelCase : List[Any] = distribution_output
lowerCAmelCase : Optional[int] = loss
lowerCAmelCase : Optional[int] = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase : Dict = scaling
lowerCAmelCase : List[str] = num_dynamic_real_features
lowerCAmelCase : Dict = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[str] = cardinality
else:
lowerCAmelCase : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[Any] = embedding_dimension
else:
lowerCAmelCase : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : str = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : int = decoder_attention_heads
lowerCAmelCase : Optional[Any] = encoder_ffn_dim
lowerCAmelCase : Dict = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : int = activation_dropout
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Optional[Any] = use_cache
# Informer
lowerCAmelCase : Dict = attention_type
lowerCAmelCase : Any = sampling_factor
lowerCAmelCase : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 681 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =IFPipeline
a : Optional[Any] =TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
a : List[str] =TEXT_TO_IMAGE_BATCH_PARAMS
a : Optional[Any] =PipelineTesterMixin.required_optional_params - {"latents"}
def lowercase__ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def lowercase__ ( self , snake_case__ , snake_case__=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith("mps" ):
lowerCAmelCase : Any = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase : Any = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowercase__ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowercase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowercase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
lowerCAmelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
lowerCAmelCase , lowerCAmelCase : str = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCAmelCase : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
lowerCAmelCase : Dict = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCAmelCase : Tuple = IFInpaintingPipeline(**pipe_a.components )
lowerCAmelCase : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_start_torch_memory_measurement()
lowerCAmelCase : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : Optional[int] = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
lowerCAmelCase : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowerCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ )
lowerCAmelCase : Any = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase : str = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_start_torch_memory_measurement()
lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ )
lowerCAmelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : Optional[Any] = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case__ )
lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ )
lowerCAmelCase : Tuple = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase : Dict = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_start_torch_memory_measurement()
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(snake_case__ )
lowerCAmelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : Optional[Any] = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , )
lowerCAmelCase : str = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case__ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case__ )
lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(snake_case__ )
lowerCAmelCase : Optional[Any] = pipe_a(
prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , )
lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def a__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : str =GPTaTokenizer
a : Optional[Any] =GPTaTokenizerFast
a : List[str] =True
a : List[Any] ={"add_prefix_space": True}
a : int =False
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase : List[Any] = {"unk_token": "<unk>"}
lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = "lower newer"
lowerCAmelCase : Dict = "lower newer"
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase : str = "lower newer"
lowerCAmelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase : Any = tokenizer.tokenize(snake_case__ , add_prefix_space=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
lowerCAmelCase : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer(add_prefix_space=snake_case__ )
lowerCAmelCase : Optional[Any] = "lower newer"
# Testing tokenization
lowerCAmelCase : Any = tokenizer.tokenize(snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Testing conversion to ids without special tokens
lowerCAmelCase : List[str] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase : Tuple = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Testing conversion to ids with special tokens
lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=snake_case__ )
lowerCAmelCase : Union[str, Any] = tokenizer.encode(snake_case__ , add_prefix_space=snake_case__ )
lowerCAmelCase : Tuple = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# Testing the unknown token
lowerCAmelCase : int = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
pass
def lowercase__ ( self , snake_case__=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
# Simple input
lowerCAmelCase : Tuple = "This is a simple input"
lowerCAmelCase : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase : Dict = ("This is a simple input", "This is a pair")
lowerCAmelCase : Union[str, Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Simple input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Simple input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" , )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Pair input
self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" )
# Pair input
self.assertRaises(
snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding="max_length" , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowerCAmelCase : str = "This is a simple input"
lowerCAmelCase : int = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase : Tuple = ("This is a simple input", "This is a pair")
lowerCAmelCase : Tuple = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase : List[Any] = tokenizer.pad_token_id
lowerCAmelCase : List[Any] = tokenizer(snake_case__ , padding="max_length" , max_length=30 , return_tensors="np" )
lowerCAmelCase : List[str] = tokenizer(snake_case__ , padding=snake_case__ , truncate=snake_case__ , return_tensors="np" )
lowerCAmelCase : Optional[int] = tokenizer(*snake_case__ , padding="max_length" , max_length=60 , return_tensors="np" )
lowerCAmelCase : int = tokenizer(snake_case__ , padding=snake_case__ , truncate=snake_case__ , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = "$$$"
lowerCAmelCase : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case__ , add_bos_token=snake_case__ )
lowerCAmelCase : Optional[Any] = "This is a simple input"
lowerCAmelCase : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase : Dict = tokenizer.bos_token_id
lowerCAmelCase : int = tokenizer(snake_case__ )
lowerCAmelCase : List[Any] = tokenizer(snake_case__ )
self.assertEqual(out_s.input_ids[0] , snake_case__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase : Optional[int] = tokenizer.decode(out_s.input_ids )
lowerCAmelCase : Dict = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , snake_case__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = [self.get_tokenizer(do_lower_case=snake_case__ , add_bos_token=snake_case__ )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase : Dict = "Encode this."
lowerCAmelCase : Dict = "This one too please."
lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
encoded_sequence += tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
lowerCAmelCase : str = tokenizer.encode_plus(
snake_case__ , snake_case__ , add_special_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , )
lowerCAmelCase : Optional[Any] = encoded_sequence_dict["input_ids"]
lowerCAmelCase : Dict = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
lowerCAmelCase : List[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(snake_case__ )
]
lowerCAmelCase : str = [x for x in filtered_sequence if x is not None]
self.assertEqual(snake_case__ , snake_case__ )
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=snake_case__ )
lowerCAmelCase : Dict = "A photo of a cat"
lowerCAmelCase : List[str] = tokenizer.encode(
snake_case__ , )
self.assertEqual(snake_case__ , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained("./test_opt" )
lowerCAmelCase : Union[str, Any] = tokenizer.encode(
snake_case__ , )
self.assertEqual(snake_case__ , [2, 250, 1_345, 9, 10, 4_758] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=snake_case__ )
lowerCAmelCase : List[str] = "A photo of a cat"
lowerCAmelCase : List[str] = tokenizer.encode(
snake_case__ , )
# Same as above
self.assertEqual(snake_case__ , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=snake_case__ )
lowerCAmelCase : Optional[Any] = "bos"
lowerCAmelCase : Dict = tokenizer.get_vocab()["bos"]
lowerCAmelCase : Optional[int] = "A photo of a cat"
lowerCAmelCase : Dict = tokenizer.encode(
snake_case__ , )
# We changed the bos token
self.assertEqual(snake_case__ , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowerCAmelCase : List[str] = tokenizer.encode(
snake_case__ , )
self.assertEqual(snake_case__ , [31_957, 250, 1_345, 9, 10, 4_758] )
| 681 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase__ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCAmelCase : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(snake_case__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase : int = black.format_str(snake_case__ , mode=snake_case__ )
lowerCAmelCase : Dict = os.path.join(self.diffusers_dir , "new_code.py" )
with open(snake_case__ , "w" , newline="\n" ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case__ )
with open(snake_case__ , "r" ) as f:
self.assertTrue(f.read() , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , snake_case__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , snake_case__ ) , )
# Copy consistency with a really long name
lowerCAmelCase : Union[str, Any] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , snake_case__ , snake_case__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , snake_case__ , overwrite_result=re.sub("DDPM" , "Test" , snake_case__ ) , )
| 681 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="resnet50" , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=True , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : List[str] = parent
lowerCAmelCase : Union[str, Any] = out_indices if out_indices is not None else [4]
lowerCAmelCase : Tuple = stage_names
lowerCAmelCase : Any = out_features
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : Tuple = is_training
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values
def lowercase__ ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TimmBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase : Tuple = config_and_inputs
lowerCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(TimmBackbone,) if is_torch_available() else ()
a : Union[str, Any] ={"feature-extraction": TimmBackbone} if is_torch_available() else {}
a : Tuple =False
a : List[Any] =False
a : Optional[Any] =False
a : Dict =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TimmBackboneModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "resnet18"
lowerCAmelCase : str = "microsoft/resnet-18"
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ )
lowerCAmelCase : List[str] = AutoBackbone.from_pretrained(snake_case__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ , out_indices=[1, 2, 3] )
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : int = True
lowerCAmelCase : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase : Optional[int] = self.all_model_classes[0]
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = model(**snake_case__ )
lowerCAmelCase : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase : Dict = copy.deepcopy(snake_case__ )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase : Optional[int] = copy.deepcopy(snake_case__ )
lowerCAmelCase : List[str] = False
lowerCAmelCase : int = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[Any] = model(**snake_case__ )
| 681 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE ) + 1 ):
lowerCAmelCase : int = [x.match(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def replace(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Any = _get_partition_rules()
lowerCAmelCase : Tuple = _replacement_rules(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE )}
lowerCAmelCase : List[Any] = {k: replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE ) )
| 681 | 1 |
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=0.2 , snake_case__=0.2 ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = bp_numa
lowerCAmelCase : Any = bp_numa
lowerCAmelCase : Optional[Any] = bp_numa
lowerCAmelCase : Tuple = conva_get[:2]
lowerCAmelCase : Dict = conva_get[2]
lowerCAmelCase : Optional[int] = size_pa
lowerCAmelCase : List[str] = rate_w
lowerCAmelCase : List[Any] = rate_t
lowerCAmelCase : Any = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCAmelCase : Optional[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCAmelCase : List[Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowerCAmelCase : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
lowerCAmelCase : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(snake_case__ , "wb" ) as f:
pickle.dump(snake_case__ , snake_case__ )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls , snake_case__ ):
"""simple docstring"""
with open(snake_case__ , "rb" ) as f:
lowerCAmelCase : Union[str, Any] = pickle.load(snake_case__ ) # noqa: S301
lowerCAmelCase : Optional[Any] = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowerCAmelCase : Any = model_dic.get("size_pooling1" )
lowerCAmelCase : List[Any] = model_dic.get("num_bp1" )
lowerCAmelCase : Optional[int] = model_dic.get("num_bp2" )
lowerCAmelCase : Optional[Any] = model_dic.get("num_bp3" )
lowerCAmelCase : Tuple = model_dic.get("rate_weight" )
lowerCAmelCase : Any = model_dic.get("rate_thre" )
# create model instance
lowerCAmelCase : Any = CNN(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# modify model parameter
lowerCAmelCase : Any = model_dic.get("w_conv1" )
lowerCAmelCase : Tuple = model_dic.get("wkj" )
lowerCAmelCase : List[Any] = model_dic.get("vji" )
lowerCAmelCase : int = model_dic.get("thre_conv1" )
lowerCAmelCase : List[Any] = model_dic.get("thre_bp2" )
lowerCAmelCase : Union[str, Any] = model_dic.get("thre_bp3" )
return conv_ins
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return round(snake_case__ , 3 )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = convs[0]
lowerCAmelCase : int = convs[1]
lowerCAmelCase : Optional[Any] = np.shape(snake_case__ )[0]
# get the data slice of original image data, data_focus
lowerCAmelCase : Any = []
for i_focus in range(0 , size_data - size_conv + 1 , snake_case__ ):
for j_focus in range(0 , size_data - size_conv + 1 , snake_case__ ):
lowerCAmelCase : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(snake_case__ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCAmelCase : Dict = []
lowerCAmelCase : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(snake_case__ ):
lowerCAmelCase : List[str] = []
for i_focus in range(len(snake_case__ ) ):
lowerCAmelCase : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(snake_case__ ) )
lowerCAmelCase : int = np.asmatrix(snake_case__ ).reshape(
snake_case__ , snake_case__ )
data_featuremap.append(snake_case__ )
# expanding the data slice to One dimenssion
lowerCAmelCase : str = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(snake_case__ ) )
lowerCAmelCase : str = np.asarray(snake_case__ )
return focus_list, data_featuremap
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__="average_pool" ):
"""simple docstring"""
lowerCAmelCase : int = len(featuremaps[0] )
lowerCAmelCase : Optional[int] = int(size_map / size_pooling )
lowerCAmelCase : str = []
for i_map in range(len(snake_case__ ) ):
lowerCAmelCase : Optional[int] = featuremaps[i_map]
lowerCAmelCase : int = []
for i_focus in range(0 , snake_case__ , snake_case__ ):
for j_focus in range(0 , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(snake_case__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(snake_case__ ) )
lowerCAmelCase : str = np.asmatrix(snake_case__ ).reshape(snake_case__ , snake_case__ )
featuremap_pooled.append(snake_case__ )
return featuremap_pooled
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = []
for i in range(len(snake_case__ ) ):
lowerCAmelCase : str = np.shape(data[i] )
lowerCAmelCase : Union[str, Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCAmelCase : Tuple = data_listed.getA().tolist()[0]
data_expanded.extend(snake_case__ )
lowerCAmelCase : List[str] = np.asarray(snake_case__ )
return data_expanded
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = np.asarray(snake_case__ )
lowerCAmelCase : Dict = np.shape(snake_case__ )
lowerCAmelCase : Tuple = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = []
lowerCAmelCase : str = 0
for i_map in range(snake_case__ ):
lowerCAmelCase : int = np.ones((size_map, size_map) )
for i in range(0 , snake_case__ , snake_case__ ):
for j in range(0 , snake_case__ , snake_case__ ):
lowerCAmelCase : str = pd_pool[
i_pool
]
lowerCAmelCase : List[str] = i_pool + 1
lowerCAmelCase : Tuple = np.multiply(
snake_case__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(snake_case__ )
return pd_all
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=bool ):
"""simple docstring"""
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(snake_case__ )) )
print((" - - Shape: Teach_Data ", np.shape(snake_case__ )) )
lowerCAmelCase : Any = 0
lowerCAmelCase : Dict = []
lowerCAmelCase : Tuple = 10_000
while rp < n_repeat and mse >= error_accuracy:
lowerCAmelCase : str = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(snake_case__ ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCAmelCase : Dict = np.asmatrix(datas_train[p] )
lowerCAmelCase : str = np.asarray(datas_teach[p] )
lowerCAmelCase , lowerCAmelCase : int = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase : Union[str, Any] = self.pooling(snake_case__ , self.size_poolinga )
lowerCAmelCase : Tuple = np.shape(snake_case__ )
lowerCAmelCase : Optional[int] = self._expand(snake_case__ )
lowerCAmelCase : Optional[int] = data_bp_input
lowerCAmelCase : int = np.dot(snake_case__ , self.vji.T ) - self.thre_bpa
lowerCAmelCase : Tuple = self.sig(snake_case__ )
lowerCAmelCase : Union[str, Any] = np.dot(snake_case__ , self.wkj.T ) - self.thre_bpa
lowerCAmelCase : Optional[Any] = self.sig(snake_case__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCAmelCase : Union[str, Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(snake_case__ , (1 - bp_outa) ) )
lowerCAmelCase : str = np.multiply(
np.dot(snake_case__ , self.wkj ) , np.multiply(snake_case__ , (1 - bp_outa) ) )
lowerCAmelCase : str = np.dot(snake_case__ , self.vji )
lowerCAmelCase : Optional[int] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCAmelCase : List[Any] = pd_conva_pooled.T.getA().tolist()
lowerCAmelCase : int = self._calculate_gradient_from_pool(
snake_case__ , snake_case__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCAmelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowerCAmelCase : Optional[Any] = self.rate_weight * np.dot(snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCAmelCase : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCAmelCase : Any = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCAmelCase : Dict = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCAmelCase : int = self.thre_bpa - pd_k_all * self.rate_thre
lowerCAmelCase : Optional[int] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCAmelCase : Optional[int] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCAmelCase : Union[str, Any] = rp + 1
lowerCAmelCase : Dict = error_count / patterns
all_mse.append(snake_case__ )
def draw_error():
lowerCAmelCase : List[str] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(snake_case__ , "+-" )
plt.plot(snake_case__ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(snake_case__ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(snake_case__ )) )
for p in range(len(snake_case__ ) ):
lowerCAmelCase : str = np.asmatrix(datas_test[p] )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase : List[str] = self.pooling(snake_case__ , self.size_poolinga )
lowerCAmelCase : Dict = self._expand(snake_case__ )
lowerCAmelCase : Union[str, Any] = data_bp_input
lowerCAmelCase : List[str] = bp_outa * self.vji.T - self.thre_bpa
lowerCAmelCase : int = self.sig(snake_case__ )
lowerCAmelCase : Tuple = bp_outa * self.wkj.T - self.thre_bpa
lowerCAmelCase : Dict = self.sig(snake_case__ )
produce_out.extend(bp_outa.getA().tolist() )
lowerCAmelCase : Union[str, Any] = [list(map(self.do_round , snake_case__ ) ) for each in produce_out]
return np.asarray(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = np.asmatrix(snake_case__ )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.convolute(
snake_case__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCAmelCase : str = self.pooling(snake_case__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 , SCREAMING_SNAKE_CASE : int = 2_2 ):
'''simple docstring'''
lowerCAmelCase : Dict = range(1 , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = range(1 , SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 681 | 1 |
"""simple docstring"""
import numpy as np
def a__ ( SCREAMING_SNAKE_CASE : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase : List[str] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase : str = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
lowerCAmelCase : str = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 681 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=SCREAMING_SNAKE_CASE , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=SCREAMING_SNAKE_CASE )
return parser.parse_args()
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Any = parse_args()
# Import training_script as a module.
lowerCAmelCase : int = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCAmelCase : Any = script_fpath.stem
lowerCAmelCase : Union[str, Any] = importlib.import_module(SCREAMING_SNAKE_CASE )
# Patch sys.argv
lowerCAmelCase : Tuple = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 681 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ = 10
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = 1
lowerCAmelCase : Union[str, Any] = max(SCREAMING_SNAKE_CASE )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCAmelCase : list[list] = [[] for _ in range(SCREAMING_SNAKE_CASE )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCAmelCase : Optional[int] = int((i / placement) % RADIX )
buckets[tmp].append(SCREAMING_SNAKE_CASE )
# put each buckets' contents into list_of_ints
lowerCAmelCase : List[Any] = 0
for b in range(SCREAMING_SNAKE_CASE ):
for i in buckets[b]:
lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
import math
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
lowerCAmelCase : List[str] = n
while left <= right:
lowerCAmelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase : int = mid - 1
else:
lowerCAmelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Tuple =(DEISMultistepScheduler,)
a : int =(("num_inference_steps", 25),)
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**snake_case__ )
return config
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = dict(self.forward_default_kwargs )
lowerCAmelCase : Optional[int] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : List[Any] = self.dummy_sample
lowerCAmelCase : str = 0.1 * sample
lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Optional[Any] = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase : str = sample, sample
for t in range(snake_case__ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase : str = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : Any = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
lowerCAmelCase : Optional[int] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : Any = self.dummy_sample
lowerCAmelCase : List[Any] = 0.1 * sample
lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : Optional[Any] = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : Union[str, Any] = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self , snake_case__=None , **snake_case__ ):
"""simple docstring"""
if scheduler is None:
lowerCAmelCase : str = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : Optional[Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
lowerCAmelCase : int = 10
lowerCAmelCase : Any = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Any = model(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = dict(self.forward_default_kwargs )
lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : str = self.get_scheduler_config()
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
lowerCAmelCase : int = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
lowerCAmelCase : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase : Optional[int] = scheduler.timesteps[5]
lowerCAmelCase : List[str] = scheduler.timesteps[6]
lowerCAmelCase : List[Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : int = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase : Dict = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : Tuple = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
lowerCAmelCase : str = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Dict = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Tuple = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=snake_case__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , algorithm_type="deis" , solver_order=snake_case__ , solver_type=snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
lowerCAmelCase : Optional[Any] = self.full_loop(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
assert not torch.isnan(snake_case__ ).any(), "Samples have nan numbers"
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=snake_case__ )
self.check_over_configs(lower_order_final=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=snake_case__ , time_step=0 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.full_loop()
lowerCAmelCase : Any = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase : int = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config(thresholding=snake_case__ , dynamic_thresholding_ratio=0 )
lowerCAmelCase : Optional[Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : Any = 10
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : str = model(snake_case__ , snake_case__ )
lowerCAmelCase : int = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
assert sample.dtype == torch.floataa
| 681 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] ="vit"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=16 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Tuple = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : str = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] =version.parse("1.11" )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 681 | 1 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf_8" ) as f:
lowerCAmelCase : Tuple = csv.reader(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
next(SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for dataset in encoded_datasets:
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
lowerCAmelCase : List[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Tuple = with_conta
lowerCAmelCase : Any = with_conta
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Optional[Any] = with_conta
lowerCAmelCase : List[Any] = with_conta
lowerCAmelCase : str = mc_label
lowerCAmelCase : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE , default=3_7_4 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
lowerCAmelCase : Tuple = parser.parse_args()
print(SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase : str = ["_start_", "_delimiter_", "_classify_"]
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
model.to(SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("Encoding dataset..." )
lowerCAmelCase : Optional[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase : int = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase : Tuple = (train_dataset, eval_dataset)
lowerCAmelCase : Dict = tokenize_and_encode(SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
lowerCAmelCase : Any = model.config.n_positions // 2 - 2
lowerCAmelCase : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase : Any = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Tuple = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase : List[str] = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = RandomSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
lowerCAmelCase : int = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = SequentialSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase : int = args.max_steps
lowerCAmelCase : str = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase : Dict = list(model.named_parameters() )
lowerCAmelCase : str = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
lowerCAmelCase : Tuple = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
lowerCAmelCase : Tuple = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase : str = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
if args.do_train:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = tqdm(SCREAMING_SNAKE_CASE , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = batch
lowerCAmelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase : int = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase : Any = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
lowerCAmelCase , lowerCAmelCase : Optional[int] = 0, 0
lowerCAmelCase , lowerCAmelCase : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE , desc="Evaluating" ):
lowerCAmelCase : List[Any] = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = batch
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = mc_logits.detach().cpu().numpy()
lowerCAmelCase : List[str] = mc_labels.to("cpu" ).numpy()
lowerCAmelCase : Any = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase : List[Any] = eval_loss / nb_eval_steps
lowerCAmelCase : List[Any] = eval_accuracy / nb_eval_examples
lowerCAmelCase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
lowerCAmelCase : List[str] = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 681 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 681 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
lowerCAmelCase : List[Any] = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
lowerCAmelCase : Optional[Any] = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE , 1 ):
if n < _p:
# then we have our last prime to check
lowerCAmelCase : Optional[Any] = primes[:idx]
break
lowerCAmelCase , lowerCAmelCase : str = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCAmelCase : Any = False
for r in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Any = pow(SCREAMING_SNAKE_CASE , d * 2**r , SCREAMING_SNAKE_CASE )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCAmelCase : Any = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def a__ ( ):
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 681 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="resnet50" , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=True , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : List[str] = parent
lowerCAmelCase : Union[str, Any] = out_indices if out_indices is not None else [4]
lowerCAmelCase : Tuple = stage_names
lowerCAmelCase : Any = out_features
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : Tuple = is_training
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values
def lowercase__ ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TimmBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase : Tuple = config_and_inputs
lowerCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(TimmBackbone,) if is_torch_available() else ()
a : Union[str, Any] ={"feature-extraction": TimmBackbone} if is_torch_available() else {}
a : Tuple =False
a : List[Any] =False
a : Optional[Any] =False
a : Dict =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TimmBackboneModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "resnet18"
lowerCAmelCase : str = "microsoft/resnet-18"
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ )
lowerCAmelCase : List[str] = AutoBackbone.from_pretrained(snake_case__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ , out_indices=[1, 2, 3] )
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : int = True
lowerCAmelCase : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase : Optional[int] = self.all_model_classes[0]
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = model(**snake_case__ )
lowerCAmelCase : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase : Dict = copy.deepcopy(snake_case__ )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase : Optional[int] = copy.deepcopy(snake_case__ )
lowerCAmelCase : List[str] = False
lowerCAmelCase : int = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[Any] = model(**snake_case__ )
| 681 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=7 , snake_case__=3 , snake_case__=30 , snake_case__=400 , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=1 / 255 , snake_case__=True , snake_case__=[0.5, 0.5, 0.5] , snake_case__=[0.5, 0.5, 0.5] , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[int] = batch_size
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : Optional[int] = min_resolution
lowerCAmelCase : Optional[Any] = max_resolution
lowerCAmelCase : List[Any] = do_resize
lowerCAmelCase : Tuple = size
lowerCAmelCase : Optional[int] = do_rescale
lowerCAmelCase : Optional[int] = rescale_factor
lowerCAmelCase : Any = do_normalize
lowerCAmelCase : List[Any] = image_mean
lowerCAmelCase : Optional[Any] = image_std
lowerCAmelCase : List[str] = do_pad
def lowercase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowercase__ ( self , snake_case__ , snake_case__=False ):
"""simple docstring"""
if not batched:
lowerCAmelCase : Optional[Any] = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
lowerCAmelCase , lowerCAmelCase : str = image.size
else:
lowerCAmelCase , lowerCAmelCase : Optional[int] = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase : Union[str, Any] = int(self.size["shortest_edge"] * h / w )
lowerCAmelCase : Any = self.size["shortest_edge"]
elif w > h:
lowerCAmelCase : Union[str, Any] = self.size["shortest_edge"]
lowerCAmelCase : Optional[int] = int(self.size["shortest_edge"] * w / h )
else:
lowerCAmelCase : str = self.size["shortest_edge"]
lowerCAmelCase : Any = self.size["shortest_edge"]
else:
lowerCAmelCase : Optional[Any] = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase : Optional[Any] = max(snake_case__ , key=lambda snake_case__ : item[0] )[0]
lowerCAmelCase : List[Any] = max(snake_case__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Dict =DetrImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = DetrImageProcessingTester(self )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_rescale" ) )
self.assertTrue(hasattr(snake_case__ , "rescale_factor" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "do_pad" ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , snake_case__ )
lowerCAmelCase : int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
lowerCAmelCase : List[Any] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : List[Any] = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase : int = image_processing(snake_case__ , return_tensors="pt" ).pixel_values
lowerCAmelCase , lowerCAmelCase : int = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCAmelCase : Tuple = json.loads(f.read() )
lowerCAmelCase : Dict = {"image_id": 39_769, "annotations": target}
# encode them
lowerCAmelCase : List[str] = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
lowerCAmelCase : Tuple = image_processing(images=snake_case__ , annotations=snake_case__ , return_tensors="pt" )
# verify pixel values
lowerCAmelCase : Tuple = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , snake_case__ )
lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Tuple = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) )
# verify boxes
lowerCAmelCase : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ )
lowerCAmelCase : List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : str = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) )
# verify is_crowd
lowerCAmelCase : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) )
# verify class_labels
lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) )
# verify orig_size
lowerCAmelCase : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) )
# verify size
lowerCAmelCase : Optional[int] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCAmelCase : Union[str, Any] = json.loads(f.read() )
lowerCAmelCase : Any = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
lowerCAmelCase : List[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
lowerCAmelCase : Any = image_processing(images=snake_case__ , annotations=snake_case__ , masks_path=snake_case__ , return_tensors="pt" )
# verify pixel values
lowerCAmelCase : List[str] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , snake_case__ )
lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case__ , atol=1e-4 ) )
# verify area
lowerCAmelCase : Union[str, Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case__ ) )
# verify boxes
lowerCAmelCase : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case__ )
lowerCAmelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case__ , atol=1e-3 ) )
# verify image_id
lowerCAmelCase : int = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case__ ) )
# verify is_crowd
lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case__ ) )
# verify class_labels
lowerCAmelCase : Optional[int] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case__ ) )
# verify masks
lowerCAmelCase : str = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , snake_case__ )
# verify orig_size
lowerCAmelCase : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case__ ) )
# verify size
lowerCAmelCase : Tuple = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case__ ) )
| 681 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(r'''\[([^\]]+)\]''')
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]="" , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
lowerCAmelCase : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : List[str] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : Optional[Any] = []
else:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def _inner(SCREAMING_SNAKE_CASE : Optional[Any] ):
return key(SCREAMING_SNAKE_CASE ).lower().replace("_" , "" )
return _inner
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
def noop(SCREAMING_SNAKE_CASE : List[Any] ):
return x
if key is None:
lowerCAmelCase : int = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : Dict = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : List[Any] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCAmelCase : Dict = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def _replace(SCREAMING_SNAKE_CASE : List[Any] ):
lowerCAmelCase : List[str] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase : Dict = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
lowerCAmelCase : List[Any] = import_statement.split("\n" )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Tuple = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase : Optional[Any] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
lowerCAmelCase : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[str] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Union[str, Any] = keys[:-1]
lowerCAmelCase : str = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Any = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=True ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[str] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : Tuple = main_blocks[block_idx]
lowerCAmelCase : Optional[Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Tuple = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : Tuple = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : int = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
lowerCAmelCase : Union[str, Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : List[Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write("\n".join(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : List[str]=True ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" ) , check_only=SCREAMING_SNAKE_CASE )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 681 | 1 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(snake_case__ )
lowerCAmelCase : Dict = self.values[key]
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.charge_factor - len(snake_case__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowercase__ ( self , snake_case__ , snake_case__=None ):
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(snake_case__ ) == 0
):
return key
return super()._collision_resolution(snake_case__ , snake_case__ )
| 681 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[Any] = embeddings_size
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : str = is_training
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : Tuple = scope
lowerCAmelCase : int = len(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFResNetModel(config=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : str = TFResNetForImageClassification(snake_case__ )
lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Any =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Tuple =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : int =False
a : List[str] =False
a : Optional[int] =False
a : Union[str, Any] =False
a : Any =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFResNetModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Dict = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : Optional[Any] = layer_type
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFResNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase : Any = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : str = model(**snake_case__ )
# verify the logits
lowerCAmelCase : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 681 | 1 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Union[str, Any] =WavaVecaPhonemeCTCTokenizer
a : List[str] =False
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase : Dict = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
lowerCAmelCase : int = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : Union[str, Any] = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
def lowercase__ ( self , snake_case__ , snake_case__=False , snake_case__=20 , snake_case__=5 ):
"""simple docstring"""
lowerCAmelCase : List[Any] = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case__ )) for i in range(len(snake_case__ ) )]
lowerCAmelCase : Dict = list(filter(lambda snake_case__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=snake_case__ ) , snake_case__ ) )
if max_length is not None and len(snake_case__ ) > max_length:
lowerCAmelCase : List[str] = toks[:max_length]
if min_length is not None and len(snake_case__ ) < min_length and len(snake_case__ ) > 0:
while len(snake_case__ ) < min_length:
lowerCAmelCase : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase : str = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase : int = tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ )
if " " not in output_txt and len(snake_case__ ) > 1:
lowerCAmelCase : Optional[int] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case__ )
)
if with_prefix_space:
lowerCAmelCase : int = " " + output_txt
lowerCAmelCase : Optional[Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
return output_txt, output_ids
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
lowerCAmelCase : Any = tokenizer("m xxx ɪ" , do_phonemize=snake_case__ ).input_ids
self.assertEqual(snake_case__ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
lowerCAmelCase : Tuple = tokenizer("m aaa ɪ ccc" , do_phonemize=snake_case__ ).input_ids
self.assertEqual(snake_case__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCAmelCase : Union[str, Any] = tokenizer("maɪ c" , do_phonemize=snake_case__ ).input_ids
self.assertEqual(snake_case__ , [3, 200] ) # mai should be <unk> (=3)
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCAmelCase : Optional[Any] = "Hello how are you"
lowerCAmelCase : List[str] = tokenizer.phonemize(snake_case__ , phonemizer_lang="en-us" )
self.assertEqual(snake_case__ , "h ə l oʊ h aʊ ɑːɹ j uː" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCAmelCase : Any = "Hello how are you"
lowerCAmelCase : int = tokenizer.phonemize(snake_case__ , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(snake_case__ ).input_ids , tokenizer(snake_case__ , do_phonemize=snake_case__ ).input_ids )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCAmelCase : Optional[Any] = "Hello how are you"
lowerCAmelCase : Dict = tokenizer.phonemize(snake_case__ , phonemizer_lang="en-us" )
lowerCAmelCase : List[Any] = tokenizer.decode(tokenizer(snake_case__ ).input_ids )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCAmelCase : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCAmelCase : Tuple = tokenizer.decode(sample_ids[0] )
lowerCAmelCase : Tuple = tokenizer.batch_decode(snake_case__ )
self.assertEqual(snake_case__ , batch_tokens[0] )
self.assertEqual(snake_case__ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowerCAmelCase : Optional[int] = "Hello how are you"
lowerCAmelCase : Any = tokenizer.phonemize(snake_case__ , phonemizer_lang="en-us" )
self.assertEqual(snake_case__ , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowerCAmelCase : Optional[int] = "Hello how are you"
lowerCAmelCase : str = tokenizer.phonemize(snake_case__ , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(snake_case__ ).input_ids , tokenizer(snake_case__ , do_phonemize=snake_case__ ).input_ids )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
lowerCAmelCase : Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCAmelCase : List[Any] = tokenizer.decode(sample_ids[0] )
lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case__ )
self.assertEqual(snake_case__ , batch_tokens[0] )
self.assertEqual(snake_case__ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
lowerCAmelCase : List[Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=snake_case__ )
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(snake_case__ , filter_word_delimiter_token=snake_case__ )
self.assertEqual(snake_case__ , batch_tokens[0] )
self.assertEqual(snake_case__ , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowerCAmelCase : List[Any] = "Hello how are you"
lowerCAmelCase : int = tokenizer.phonemize(snake_case__ , phonemizer_lang="en-us" )
lowerCAmelCase : Optional[Any] = tokenizer.decode(tokenizer(snake_case__ ).input_ids , filter_word_delimiter_token=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
lowerCAmelCase : Optional[int] = "Hello how are you"
lowerCAmelCase : List[Any] = tokenizer.phonemize(snake_case__ , phonemizer_lang="en-us" )
lowerCAmelCase : Optional[int] = tokenizer.decode(tokenizer(snake_case__ ).input_ids , filter_word_delimiter_token=snake_case__ )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=snake_case__ )
lowerCAmelCase : Optional[Any] = "Hello how are you"
lowerCAmelCase : Dict = tokenizer(snake_case__ , phonemizer_lang="en-us" ).input_ids
lowerCAmelCase : Tuple = tokenizer(snake_case__ , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(snake_case__ , snake_case__ )
lowerCAmelCase : str = tokenizer.decode(snake_case__ )
lowerCAmelCase : Optional[Any] = tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(snake_case__ , "ɛ l o h aʊ a ʁ j u" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
lowerCAmelCase : Optional[int] = "Hello how Are you"
lowerCAmelCase : Optional[Any] = "hello how are you"
lowerCAmelCase : Tuple = tokenizer(snake_case__ ).input_ids
lowerCAmelCase : Union[str, Any] = tokenizer(snake_case__ ).input_ids
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
lowerCAmelCase : Any = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case__ )
self.assertEqual(snake_case__ , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def lowercase__ ( snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCAmelCase : Tuple = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCAmelCase : Optional[int] = tokenizer.decode(snake_case__ , output_char_offsets=snake_case__ , filter_word_delimiter_token=snake_case__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(snake_case__ , snake_case__ ):
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
self.assertTrue(isinstance(outputs_list[0] , snake_case__ ) )
# transform list to ModelOutput
lowerCAmelCase : Any = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(snake_case__ , snake_case__ ):
if isinstance(snake_case__ , snake_case__ ):
[recursive_check(snake_case__ , snake_case__ ) for la, la in zip(snake_case__ , snake_case__ )]
self.assertEqual(snake_case__ , snake_case__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
lowerCAmelCase : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(snake_case__ , output_char_offsets=snake_case__ )
lowerCAmelCase : Union[str, Any] = [tokenizer.decode(snake_case__ , output_char_offsets=snake_case__ ) for ids in sample_ids]
check_list_tuples_equal(snake_case__ , snake_case__ )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.get_tokenizers(do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase : Optional[Any] = tokenizer.vocab_size
lowerCAmelCase : Dict = len(snake_case__ )
self.assertNotEqual(snake_case__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase : str = ["aaaaa bbbbbb", "cccccccccdddddddd"]
lowerCAmelCase : Any = tokenizer.add_tokens(snake_case__ )
lowerCAmelCase : Dict = tokenizer.vocab_size
lowerCAmelCase : Optional[int] = len(snake_case__ )
self.assertNotEqual(snake_case__ , 0 )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , len(snake_case__ ) )
self.assertEqual(snake_case__ , all_size + len(snake_case__ ) )
lowerCAmelCase : int = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=snake_case__ )
self.assertGreaterEqual(len(snake_case__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase : int = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
lowerCAmelCase : Any = tokenizer.add_special_tokens(snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.vocab_size
lowerCAmelCase : List[str] = len(snake_case__ )
self.assertNotEqual(snake_case__ , 0 )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , len(snake_case__ ) )
self.assertEqual(snake_case__ , all_size_a + len(snake_case__ ) )
lowerCAmelCase : str = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=snake_case__ )
self.assertGreaterEqual(len(snake_case__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.get_tokenizers(fast=snake_case__ , do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase : str = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_string(snake_case__ )
self.assertIsInstance(output["text"] , snake_case__ )
| 681 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : int = n - 1
lowerCAmelCase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : Optional[Any] = 0
while count < prec:
lowerCAmelCase : List[str] = random.randint(2 , n - 1 )
lowerCAmelCase : Tuple = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
lowerCAmelCase : List[str] = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCAmelCase : List[str] = False
break
lowerCAmelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = size
# approximate the overall size of segment tree with given value
lowerCAmelCase : Optional[Any] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase : Tuple = [0 for i in range(0 , 4 * size )]
lowerCAmelCase : List[Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return idx * 2
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return idx * 2 + 1
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if left_element == right_element:
lowerCAmelCase : Tuple = a[left_element - 1]
else:
lowerCAmelCase : Any = (left_element + right_element) // 2
self.build(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ )
self.build(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = max(
self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.flag[idx] is True:
lowerCAmelCase : Any = self.lazy[idx]
lowerCAmelCase : List[Any] = False
if left_element != right_element:
lowerCAmelCase : Dict = self.lazy[idx]
lowerCAmelCase : Tuple = self.lazy[idx]
lowerCAmelCase : int = True
lowerCAmelCase : Any = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase : List[str] = val
if left_element != right_element:
lowerCAmelCase : Union[str, Any] = val
lowerCAmelCase : Dict = val
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Optional[Any] = True
return True
lowerCAmelCase : List[str] = (left_element + right_element) // 2
self.update(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.update(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = max(
self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] )
return True
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.flag[idx] is True:
lowerCAmelCase : Union[str, Any] = self.lazy[idx]
lowerCAmelCase : Any = False
if left_element != right_element:
lowerCAmelCase : Union[str, Any] = self.lazy[idx]
lowerCAmelCase : List[Any] = self.lazy[idx]
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase : List[Any] = (left_element + right_element) // 2
lowerCAmelCase : Optional[int] = self.query(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = self.query(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ )
return max(snake_case__ , snake_case__ )
def __str__( self ):
"""simple docstring"""
return str([self.query(1 , 1 , self.size , snake_case__ , snake_case__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCAmelCase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowerCAmelCase__ = 15
lowerCAmelCase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 681 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : CommonSchedulerState
# setable values
a : jnp.ndarray
a : jnp.ndarray
a : Optional[int] =None
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : DDPMSchedulerState
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
a : Union[str, Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
a : jnp.dtype
@property
def lowercase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case__ = 1_000 , snake_case__ = 0.0001 , snake_case__ = 0.02 , snake_case__ = "linear" , snake_case__ = None , snake_case__ = "fixed_small" , snake_case__ = True , snake_case__ = "epsilon" , snake_case__ = jnp.floataa , ):
"""simple docstring"""
lowerCAmelCase : Any = dtype
def lowercase__ ( self , snake_case__ = None ):
"""simple docstring"""
if common is None:
lowerCAmelCase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase : str = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
"""simple docstring"""
return sample
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = () ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase : Any = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = state.common.alphas_cumprod[t]
lowerCAmelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase : List[Any] = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase : List[str] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase : List[str] = variance
lowerCAmelCase : Dict = state.common.betas[t]
lowerCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
lowerCAmelCase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = timestep
if key is None:
lowerCAmelCase : Tuple = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
lowerCAmelCase : Tuple = None
# 1. compute alphas, betas
lowerCAmelCase : Optional[int] = state.common.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Optional[int] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase : Tuple = jax.random.split(snake_case__ , num=1 )
lowerCAmelCase : str = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
lowerCAmelCase : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : int | str ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = str(SCREAMING_SNAKE_CASE )
return n == n[::-1]
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0_0_0_0 ):
'''simple docstring'''
lowerCAmelCase : Any = 0
for i in range(1 , SCREAMING_SNAKE_CASE ):
if is_palindrome(SCREAMING_SNAKE_CASE ) and is_palindrome(bin(SCREAMING_SNAKE_CASE ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 681 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = OmegaConf.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
lowerCAmelCase : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = "first_stage_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = state_dict[key]
lowerCAmelCase : List[str] = config.model.params.first_stage_config.params
lowerCAmelCase : List[Any] = config.model.params.unet_config.params
lowerCAmelCase : Union[str, Any] = VQModel(**SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = UNetLDMModel(**SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Tuple = LDMPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 681 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = torch.device('''cpu''')
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase : Optional[int] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = dct.pop(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = val
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Tuple = []
for k in state_dict.keys():
lowerCAmelCase : Union[str, Any] = k
if ".pwconv" in k:
lowerCAmelCase : List[Any] = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
lowerCAmelCase : Any = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
lowerCAmelCase : Union[str, Any] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
lowerCAmelCase : str = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
lowerCAmelCase : Tuple = k_new.split("." )
if ls[2].isdigit():
lowerCAmelCase : Optional[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
lowerCAmelCase : List[str] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : List[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase : int = 1_0_0_0
lowerCAmelCase : List[Any] = "huggingface/label-files"
lowerCAmelCase : Optional[Any] = "imagenet-1k-id2label.json"
lowerCAmelCase : Optional[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
lowerCAmelCase : Tuple = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase : int = idalabel
lowerCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCAmelCase : Dict = [3, 3, 6, 4]
lowerCAmelCase : Dict = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
lowerCAmelCase : List[str] = [3, 3, 9, 6]
lowerCAmelCase : Union[str, Any] = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
lowerCAmelCase : Tuple = [4, 3, 1_0, 5]
lowerCAmelCase : Any = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
lowerCAmelCase : Dict = [4, 4, 1_2, 6]
lowerCAmelCase : Any = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="cpu" , check_hash=SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : Dict = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )
lowerCAmelCase : List[str] = checkpoint
lowerCAmelCase : str = create_rename_keys(SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCAmelCase : List[str] = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(SCREAMING_SNAKE_CASE )
# prepare test inputs
lowerCAmelCase : List[str] = prepare_img()
lowerCAmelCase : Dict = ViTImageProcessor.from_pretrained("preprocessor_config" )
lowerCAmelCase : Tuple = processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" )
# compare outputs from both models
lowerCAmelCase : str = get_expected_output(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , SCREAMING_SNAKE_CASE , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
lowerCAmelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 681 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any=False ):
'''simple docstring'''
lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""module.blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""module.blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""module.blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""module.blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase : Optional[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase : str = ""
else:
lowerCAmelCase : List[Any] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Tuple = state_dict.pop(f"""module.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase : Union[str, Any] = state_dict.pop(f"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : int = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase : List[str] = in_proj_bias[-config.hidden_size :]
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : str = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = dct.pop(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = val
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Any = ViTMSNConfig()
lowerCAmelCase : List[Any] = 1_0_0_0
lowerCAmelCase : Dict = "datasets/huggingface/label-files"
lowerCAmelCase : List[Any] = "imagenet-1k-id2label.json"
lowerCAmelCase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "r" ) )
lowerCAmelCase : Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase : List[Any] = idalabel
lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowerCAmelCase : Any = 3_8_4
lowerCAmelCase : str = 1_5_3_6
lowerCAmelCase : List[Any] = 6
elif "l16" in checkpoint_url:
lowerCAmelCase : Optional[Any] = 1_0_2_4
lowerCAmelCase : List[str] = 4_0_9_6
lowerCAmelCase : Union[str, Any] = 2_4
lowerCAmelCase : Union[str, Any] = 1_6
lowerCAmelCase : Optional[Any] = 0.1
elif "b4" in checkpoint_url:
lowerCAmelCase : str = 4
elif "l7" in checkpoint_url:
lowerCAmelCase : Tuple = 7
lowerCAmelCase : Any = 1_0_2_4
lowerCAmelCase : int = 4_0_9_6
lowerCAmelCase : List[str] = 2_4
lowerCAmelCase : Any = 1_6
lowerCAmelCase : Any = 0.1
lowerCAmelCase : Tuple = ViTMSNModel(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="cpu" )["target_encoder"]
lowerCAmelCase : Optional[int] = ViTImageProcessor(size=config.image_size )
remove_projection_head(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase : Tuple = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
lowerCAmelCase : Optional[Any] = ViTImageProcessor(
size=config.image_size , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase : Optional[Any] = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowerCAmelCase : int = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
lowerCAmelCase : Optional[int] = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
lowerCAmelCase : str = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
lowerCAmelCase : Tuple = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
lowerCAmelCase : Union[str, Any] = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 681 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=snake_case__ , )
assert hasattr(self , "env" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = {
"enabled": True,
"processes_per_host": 8,
}
lowerCAmelCase : List[Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowerCAmelCase : List[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowerCAmelCase : Optional[Any] = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="py36" , )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
TrainingJobAnalytics(snake_case__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case__ )
| 681 | 1 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCAmelCase__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
lowerCAmelCase__ = '''path-to-your-trained-model'''
lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCAmelCase__ = pipe.to(device)
# to channels last
lowerCAmelCase__ = pipe.unet.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.vae.to(memory_format=torch.channels_last)
lowerCAmelCase__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCAmelCase__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCAmelCase__ = torch.randn(2, 4, 64, 64)
lowerCAmelCase__ = torch.rand(1) * 999
lowerCAmelCase__ = torch.randn(2, 77, 768)
lowerCAmelCase__ = (sample, timestep, encoder_hidden_status)
try:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCAmelCase__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCAmelCase__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCAmelCase__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCAmelCase__ = 666
lowerCAmelCase__ = torch.Generator(device).manual_seed(seed)
lowerCAmelCase__ = {'''generator''': generator}
if args.steps is not None:
lowerCAmelCase__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCAmelCase__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 681 |
"""simple docstring"""
from math import factorial
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ):
'''simple docstring'''
return sum(int(SCREAMING_SNAKE_CASE ) for x in str(factorial(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 681 | 1 |
"""simple docstring"""
import re
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[str] = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 681 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = data
lowerCAmelCase : Any = None
def __repr__( self ):
"""simple docstring"""
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCAmelCase : Union[str, Any] = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : int = current.next
lowerCAmelCase : List[str] = data
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCAmelCase : Optional[int] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : Any = new_node
elif index == 0:
lowerCAmelCase : Any = self.head # link new_node to head
lowerCAmelCase : Union[str, Any] = new_node
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : int = temp.next
lowerCAmelCase : Dict = new_node
def lowercase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCAmelCase : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase : Optional[int] = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Optional[Any] = temp.next
lowerCAmelCase : Any = temp.next.next
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return self.head is None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase : Dict = prev
# Make the previous node be the current node
lowerCAmelCase : List[str] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : int = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Tuple = prev
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(SCREAMING_SNAKE_CASE ) == 9
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase : List[str] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : str = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase : Any = input("Enter New Value: " ).strip()
print("New list:" )
print(SCREAMING_SNAKE_CASE )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def lowercase__ ( *snake_case__ , **snake_case__ ):
"""simple docstring"""
pass
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCAmelCase__ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = pipeline(
"document-question-answering" , model=snake_case__ , tokenizer=snake_case__ , image_processor=snake_case__ )
lowerCAmelCase : Optional[int] = INVOICE_URL
lowerCAmelCase : int = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , "" ) ) )
lowerCAmelCase : List[Any] = "What is the placebo?"
lowerCAmelCase : Optional[Any] = [
{
"image": load_image(snake_case__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = dqa_pipeline(snake_case__ , top_k=2 )
self.assertEqual(
snake_case__ , [
[
{"score": ANY(snake_case__ ), "answer": ANY(snake_case__ ), "start": ANY(snake_case__ ), "end": ANY(snake_case__ )},
{"score": ANY(snake_case__ ), "answer": ANY(snake_case__ ), "start": ANY(snake_case__ ), "end": ANY(snake_case__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
lowerCAmelCase : int = INVOICE_URL
lowerCAmelCase : Union[str, Any] = "How many cats are there?"
lowerCAmelCase : List[Any] = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
lowerCAmelCase : Any = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , snake_case__ )
lowerCAmelCase : List[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , snake_case__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase : str = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCAmelCase : Dict = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(snake_case__ , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase : Dict = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCAmelCase : List[Any] = []
lowerCAmelCase : List[Any] = []
lowerCAmelCase : Optional[int] = dqa_pipeline(image=snake_case__ , question=snake_case__ , words=snake_case__ , boxes=snake_case__ , top_k=2 )
self.assertEqual(snake_case__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
lowerCAmelCase : Any = INVOICE_URL
lowerCAmelCase : Union[str, Any] = "What is the invoice number?"
lowerCAmelCase : Any = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : Union[str, Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
lowerCAmelCase : Dict = INVOICE_URL
lowerCAmelCase : str = "What is the invoice number?"
lowerCAmelCase : Union[str, Any] = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : Union[str, Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : List[str] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=snake_case__ )
lowerCAmelCase : Union[str, Any] = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=snake_case__ , revision="3dc6de3" , )
lowerCAmelCase : List[str] = INVOICE_URL
lowerCAmelCase : Dict = "What is the invoice number?"
lowerCAmelCase : List[str] = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCAmelCase : int = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCAmelCase : List[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
lowerCAmelCase : Optional[int] = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , "" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase : str = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=snake_case__ )
lowerCAmelCase : Dict = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=snake_case__ , revision="3dc6de3" , max_seq_len=50 , )
lowerCAmelCase : Optional[Any] = INVOICE_URL
lowerCAmelCase : Optional[int] = "What is the invoice number?"
lowerCAmelCase : Optional[int] = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase : Dict = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
lowerCAmelCase : Union[str, Any] = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , "" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase : List[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
lowerCAmelCase : List[Any] = INVOICE_URL
lowerCAmelCase : List[str] = "What is the invoice number?"
lowerCAmelCase : List[str] = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def lowercase__ ( self ):
"""simple docstring"""
pass
| 681 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 681 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="informer"
a : int ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = None , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 0.05 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , snake_case__ = "prob" , snake_case__ = 5 , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = prediction_length
lowerCAmelCase : Union[str, Any] = context_length or prediction_length
lowerCAmelCase : List[Any] = distribution_output
lowerCAmelCase : Optional[int] = loss
lowerCAmelCase : Optional[int] = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase : Dict = scaling
lowerCAmelCase : List[str] = num_dynamic_real_features
lowerCAmelCase : Dict = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[str] = cardinality
else:
lowerCAmelCase : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[Any] = embedding_dimension
else:
lowerCAmelCase : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : str = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : int = decoder_attention_heads
lowerCAmelCase : Optional[Any] = encoder_ffn_dim
lowerCAmelCase : Dict = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : int = activation_dropout
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Optional[Any] = use_cache
# Informer
lowerCAmelCase : Dict = attention_type
lowerCAmelCase : Any = sampling_factor
lowerCAmelCase : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 681 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return np.sum(outputs == labels )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf_8" ) as f:
lowerCAmelCase : Tuple = csv.reader(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = []
next(SCREAMING_SNAKE_CASE ) # skip the first line
for line in tqdm(SCREAMING_SNAKE_CASE ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for dataset in encoded_datasets:
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase : int = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
lowerCAmelCase : List[Any] = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase : Tuple = with_conta
lowerCAmelCase : Any = with_conta
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Dict = len(SCREAMING_SNAKE_CASE ) - 1
lowerCAmelCase : Optional[Any] = with_conta
lowerCAmelCase : List[Any] = with_conta
lowerCAmelCase : str = mc_label
lowerCAmelCase : Dict = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(SCREAMING_SNAKE_CASE ) for t in all_inputs ) )
return tensor_datasets
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=SCREAMING_SNAKE_CASE , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--eval_dataset" , type=SCREAMING_SNAKE_CASE , default="" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=4_2 )
parser.add_argument("--num_train_epochs" , type=SCREAMING_SNAKE_CASE , default=3 )
parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=8 )
parser.add_argument("--eval_batch_size" , type=SCREAMING_SNAKE_CASE , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=SCREAMING_SNAKE_CASE , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=SCREAMING_SNAKE_CASE , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=SCREAMING_SNAKE_CASE , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--lm_coef" , type=SCREAMING_SNAKE_CASE , default=0.9 )
parser.add_argument("--n_valid" , type=SCREAMING_SNAKE_CASE , default=3_7_4 )
parser.add_argument("--server_ip" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=SCREAMING_SNAKE_CASE , default="" , help="Can be used for distant debugging." )
lowerCAmelCase : Tuple = parser.parse_args()
print(SCREAMING_SNAKE_CASE )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase : Optional[int] = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase : str = ["_start_", "_delimiter_", "_classify_"]
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) )
model.to(SCREAMING_SNAKE_CASE )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE : Optional[Any] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(SCREAMING_SNAKE_CASE ) )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return obj
return [tokenize_and_encode(SCREAMING_SNAKE_CASE ) for o in obj]
logger.info("Encoding dataset..." )
lowerCAmelCase : Optional[Any] = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase : int = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase : Tuple = (train_dataset, eval_dataset)
lowerCAmelCase : Dict = tokenize_and_encode(SCREAMING_SNAKE_CASE )
# Compute the max input length for the Transformer
lowerCAmelCase : Any = model.config.n_positions // 2 - 2
lowerCAmelCase : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase : Any = pre_process_datasets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Tuple = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase : List[str] = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = RandomSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size )
lowerCAmelCase : int = TensorDataset(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = SequentialSampler(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = DataLoader(SCREAMING_SNAKE_CASE , sampler=SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase : int = args.max_steps
lowerCAmelCase : str = args.max_steps // (len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase : Dict = list(model.named_parameters() )
lowerCAmelCase : str = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
lowerCAmelCase : Tuple = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
lowerCAmelCase : Tuple = AdamW(SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase : str = get_linear_schedule_with_warmup(
SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=SCREAMING_SNAKE_CASE )
if args.do_train:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Tuple = tqdm(SCREAMING_SNAKE_CASE , desc="Training" )
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = batch
lowerCAmelCase : Optional[int] = model(SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase : int = "Training loss: {:.2e} lr: {:.2e}".format(SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase : Optional[int] = model.module if hasattr(SCREAMING_SNAKE_CASE , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase : Any = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE )
torch.save(model_to_save.state_dict() , SCREAMING_SNAKE_CASE )
model_to_save.config.to_json_file(SCREAMING_SNAKE_CASE )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase : Dict = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(SCREAMING_SNAKE_CASE )
if args.do_eval:
model.eval()
lowerCAmelCase , lowerCAmelCase : Optional[int] = 0, 0
lowerCAmelCase , lowerCAmelCase : Any = 0, 0
for batch in tqdm(SCREAMING_SNAKE_CASE , desc="Evaluating" ):
lowerCAmelCase : List[Any] = tuple(t.to(SCREAMING_SNAKE_CASE ) for t in batch )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Tuple = batch
with torch.no_grad():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE , mc_token_ids=SCREAMING_SNAKE_CASE , lm_labels=SCREAMING_SNAKE_CASE , mc_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = mc_logits.detach().cpu().numpy()
lowerCAmelCase : List[str] = mc_labels.to("cpu" ).numpy()
lowerCAmelCase : Any = accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase : List[Any] = eval_loss / nb_eval_steps
lowerCAmelCase : List[Any] = eval_accuracy / nb_eval_examples
lowerCAmelCase : Tuple = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase : Any = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
lowerCAmelCase : List[str] = os.path.join(args.output_dir , "eval_results.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 681 | 1 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 681 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[Any] ="informer"
a : int ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , snake_case__ = None , snake_case__ = None , snake_case__ = "student_t" , snake_case__ = "nll" , snake_case__ = 1 , snake_case__ = None , snake_case__ = "mean" , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = 64 , snake_case__ = 32 , snake_case__ = 32 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = 2 , snake_case__ = True , snake_case__ = "gelu" , snake_case__ = 0.05 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 0.1 , snake_case__ = 100 , snake_case__ = 0.02 , snake_case__=True , snake_case__ = "prob" , snake_case__ = 5 , snake_case__ = True , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = prediction_length
lowerCAmelCase : Union[str, Any] = context_length or prediction_length
lowerCAmelCase : List[Any] = distribution_output
lowerCAmelCase : Optional[int] = loss
lowerCAmelCase : Optional[int] = input_size
lowerCAmelCase : str = num_time_features
lowerCAmelCase : Any = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase : Dict = scaling
lowerCAmelCase : List[str] = num_dynamic_real_features
lowerCAmelCase : Dict = num_static_real_features
lowerCAmelCase : Dict = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[str] = cardinality
else:
lowerCAmelCase : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCAmelCase : List[Any] = embedding_dimension
else:
lowerCAmelCase : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase : str = d_model
lowerCAmelCase : List[str] = encoder_attention_heads
lowerCAmelCase : int = decoder_attention_heads
lowerCAmelCase : Optional[Any] = encoder_ffn_dim
lowerCAmelCase : Dict = decoder_ffn_dim
lowerCAmelCase : int = encoder_layers
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Tuple = dropout
lowerCAmelCase : List[Any] = attention_dropout
lowerCAmelCase : int = activation_dropout
lowerCAmelCase : Union[str, Any] = encoder_layerdrop
lowerCAmelCase : int = decoder_layerdrop
lowerCAmelCase : Optional[int] = activation_function
lowerCAmelCase : int = init_std
lowerCAmelCase : Optional[Any] = use_cache
# Informer
lowerCAmelCase : Dict = attention_type
lowerCAmelCase : Any = sampling_factor
lowerCAmelCase : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 681 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Tuple ="deformable_detr"
a : List[str] ={
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , snake_case__=True , snake_case__=None , snake_case__=3 , snake_case__=300 , snake_case__=1_024 , snake_case__=6 , snake_case__=1_024 , snake_case__=8 , snake_case__=6 , snake_case__=1_024 , snake_case__=8 , snake_case__=0.0 , snake_case__=True , snake_case__="relu" , snake_case__=256 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1.0 , snake_case__=True , snake_case__=False , snake_case__="sine" , snake_case__="resnet50" , snake_case__=True , snake_case__=False , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=False , snake_case__=300 , snake_case__=False , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=1 , snake_case__=1 , snake_case__=5 , snake_case__=2 , snake_case__=0.1 , snake_case__=0.25 , snake_case__=False , **snake_case__ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCAmelCase : str = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Any = backbone_config.get("model_type" )
lowerCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase : List[str] = config_class.from_dict(snake_case__ )
lowerCAmelCase : Any = use_timm_backbone
lowerCAmelCase : str = backbone_config
lowerCAmelCase : Any = num_channels
lowerCAmelCase : Optional[Any] = num_queries
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Dict = d_model
lowerCAmelCase : Optional[int] = encoder_ffn_dim
lowerCAmelCase : List[Any] = encoder_layers
lowerCAmelCase : Union[str, Any] = encoder_attention_heads
lowerCAmelCase : Any = decoder_ffn_dim
lowerCAmelCase : Union[str, Any] = decoder_layers
lowerCAmelCase : Dict = decoder_attention_heads
lowerCAmelCase : List[Any] = dropout
lowerCAmelCase : List[str] = attention_dropout
lowerCAmelCase : Dict = activation_dropout
lowerCAmelCase : Optional[Any] = activation_function
lowerCAmelCase : List[Any] = init_std
lowerCAmelCase : Tuple = init_xavier_std
lowerCAmelCase : Dict = encoder_layerdrop
lowerCAmelCase : List[str] = auxiliary_loss
lowerCAmelCase : List[str] = position_embedding_type
lowerCAmelCase : Dict = backbone
lowerCAmelCase : List[Any] = use_pretrained_backbone
lowerCAmelCase : int = dilation
# deformable attributes
lowerCAmelCase : Optional[int] = num_feature_levels
lowerCAmelCase : Optional[Any] = encoder_n_points
lowerCAmelCase : Any = decoder_n_points
lowerCAmelCase : int = two_stage
lowerCAmelCase : Union[str, Any] = two_stage_num_proposals
lowerCAmelCase : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCAmelCase : List[Any] = class_cost
lowerCAmelCase : Dict = bbox_cost
lowerCAmelCase : Optional[Any] = giou_cost
# Loss coefficients
lowerCAmelCase : Union[str, Any] = mask_loss_coefficient
lowerCAmelCase : Dict = dice_loss_coefficient
lowerCAmelCase : Tuple = bbox_loss_coefficient
lowerCAmelCase : List[Any] = giou_loss_coefficient
lowerCAmelCase : Tuple = eos_coefficient
lowerCAmelCase : Optional[int] = focal_alpha
lowerCAmelCase : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.d_model
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase : Any = self.backbone_config.to_dict()
lowerCAmelCase : List[Any] = self.__class__.model_type
return output
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if num < 0:
return False
lowerCAmelCase : int = num
lowerCAmelCase : int = 0
while num > 0:
lowerCAmelCase : Dict = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , ):
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase__ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
lowerCAmelCase : List[str] = self.diffusers_dir
shutil.copy(
os.path.join(snake_case__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowerCAmelCase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowerCAmelCase : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowerCAmelCase : int = black.format_str(snake_case__ , mode=snake_case__ )
lowerCAmelCase : Dict = os.path.join(self.diffusers_dir , "new_code.py" )
with open(snake_case__ , "w" , newline="\n" ) as f:
f.write(snake_case__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case__ )
with open(snake_case__ , "r" ) as f:
self.assertTrue(f.read() , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , snake_case__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , snake_case__ ) , )
# Copy consistency with a really long name
lowerCAmelCase : Union[str, Any] = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , snake_case__ , snake_case__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , snake_case__ , overwrite_result=re.sub("DDPM" , "Test" , snake_case__ ) , )
| 681 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCAmelCase : List[str] = 1_9_2
lowerCAmelCase : Union[str, Any] = 7_6_8
lowerCAmelCase : Optional[Any] = 1_2
lowerCAmelCase : Tuple = 3
lowerCAmelCase : Tuple = [8_0_0, 1_3_3_3]
lowerCAmelCase : Tuple = False
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase : Dict = 3_3_0
lowerCAmelCase : List[Any] = 1_4
lowerCAmelCase : Tuple = 6
lowerCAmelCase : Optional[Any] = 1_3_2_0
elif "yolos_s" in yolos_name:
lowerCAmelCase : Optional[int] = 3_8_4
lowerCAmelCase : str = 1_5_3_6
lowerCAmelCase : Optional[int] = 1_2
lowerCAmelCase : Union[str, Any] = 6
elif "yolos_b" in yolos_name:
lowerCAmelCase : Dict = [8_0_0, 1_3_4_4]
lowerCAmelCase : int = 9_1
lowerCAmelCase : Optional[int] = "huggingface/label-files"
lowerCAmelCase : List[str] = "coco-detection-id2label.json"
lowerCAmelCase : List[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
lowerCAmelCase : Tuple = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase : Optional[Any] = idalabel
lowerCAmelCase : int = {v: k for k, v in idalabel.items()}
return config
def a__ ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : YolosConfig , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase : Any = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase : List[str] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase : List[str] = in_proj_bias[: config.hidden_size]
lowerCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase : List[Any] = in_proj_weight[-config.hidden_size :, :]
lowerCAmelCase : Tuple = in_proj_bias[-config.hidden_size :]
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if "backbone" in name:
lowerCAmelCase : str = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowerCAmelCase : Any = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowerCAmelCase : Any = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowerCAmelCase : Dict = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowerCAmelCase : List[str] = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowerCAmelCase : List[Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCAmelCase : List[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCAmelCase : int = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCAmelCase : Tuple = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCAmelCase : Dict = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCAmelCase : str = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCAmelCase : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowerCAmelCase : Any = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowerCAmelCase : Any = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowerCAmelCase : List[Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def a__ ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : YolosForObjectDetection ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
lowerCAmelCase : Dict = key.split("." )
lowerCAmelCase : Dict = int(key_split[2] )
lowerCAmelCase : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCAmelCase : Tuple = val[:dim, :]
lowerCAmelCase : List[str] = val[
dim : dim * 2, :
]
lowerCAmelCase : Optional[Any] = val[-dim:, :]
else:
lowerCAmelCase : Dict = val[:dim]
lowerCAmelCase : Union[str, Any] = val[dim : dim * 2]
lowerCAmelCase : Tuple = val[-dim:]
else:
lowerCAmelCase : Any = val
return orig_state_dict
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
lowerCAmelCase : Tuple = get_yolos_config(SCREAMING_SNAKE_CASE )
# load original state_dict
lowerCAmelCase : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
# load 🤗 model
lowerCAmelCase : Tuple = YolosForObjectDetection(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase : List[Any] = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCAmelCase : Optional[int] = 8_0_0 if yolos_name != "yolos_ti" else 5_1_2
lowerCAmelCase : Dict = YolosImageProcessor(format="coco_detection" , size=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCAmelCase : Any = model(**SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = outputs.logits, outputs.pred_boxes
lowerCAmelCase , lowerCAmelCase : List[str] = None, None
if yolos_name == "yolos_ti":
lowerCAmelCase : Optional[int] = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
lowerCAmelCase : List[str] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
lowerCAmelCase : Tuple = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
lowerCAmelCase : List[Any] = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
lowerCAmelCase : List[Any] = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
lowerCAmelCase : Tuple = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(f"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
lowerCAmelCase : Tuple = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowerCAmelCase : Union[str, Any] = model_mapping[yolos_name]
image_processor.push_to_hub(SCREAMING_SNAKE_CASE , organization="hustvl" )
model.push_to_hub(SCREAMING_SNAKE_CASE , organization="hustvl" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 681 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE ) + 1 ):
lowerCAmelCase : int = [x.match(SCREAMING_SNAKE_CASE ) for x, y in zip(SCREAMING_SNAKE_CASE , ks[i:] )]
if matches and all(SCREAMING_SNAKE_CASE ):
return True
return False
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def replace(SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
for rule, replacement in rules:
if _match(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return replacement
return val
return replace
def a__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
(("transformer", "wte", "embedding"), P("mp" , SCREAMING_SNAKE_CASE )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(SCREAMING_SNAKE_CASE , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , SCREAMING_SNAKE_CASE )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Any = _get_partition_rules()
lowerCAmelCase : Tuple = _replacement_rules(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = {k: _unmatched for k in flatten_dict(SCREAMING_SNAKE_CASE )}
lowerCAmelCase : List[Any] = {k: replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(SCREAMING_SNAKE_CASE ) )
| 681 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : str =["image_processor", "tokenizer"]
a : Dict ="BlipImageProcessor"
a : Any =("BertTokenizer", "BertTokenizerFast")
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = False
super().__init__(snake_case__ , snake_case__ )
lowerCAmelCase : int = self.image_processor
def __call__( self , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
lowerCAmelCase : str = self.tokenizer
lowerCAmelCase : Dict = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
return text_encoding
# add pixel_values
lowerCAmelCase : List[Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ )
if text is not None:
lowerCAmelCase : Optional[int] = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
else:
lowerCAmelCase : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(snake_case__ )
return encoding_image_processor
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.tokenizer.model_input_names
lowerCAmelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0 , SCREAMING_SNAKE_CASE : int = 2_2 ):
'''simple docstring'''
lowerCAmelCase : Dict = range(1 , SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = range(1 , SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"{solution(10, 22) = }")
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCAmelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a__ ( SCREAMING_SNAKE_CASE : list[float] ):
'''simple docstring'''
lowerCAmelCase : Tuple = []
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : float = -1
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if arr[i] < arr[j]:
lowerCAmelCase : int = arr[j]
break
result.append(SCREAMING_SNAKE_CASE )
return result
def a__ ( SCREAMING_SNAKE_CASE : list[float] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = []
for i, outer in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCAmelCase : List[str] = inner
break
result.append(SCREAMING_SNAKE_CASE )
return result
def a__ ( SCREAMING_SNAKE_CASE : list[float] ):
'''simple docstring'''
lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : list[float] = []
lowerCAmelCase : list[float] = [-1] * arr_size
for index in reversed(range(SCREAMING_SNAKE_CASE ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCAmelCase : int = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase__ = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase : List[str] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase : str = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
lowerCAmelCase : str = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 681 | 1 |
"""simple docstring"""
import string
import numpy
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Union[str, Any] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
a : List[str] =numpy.vectorize(lambda lowercase : x % 36 )
a : Union[str, Any] =numpy.vectorize(lowercase )
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.modulus(snake_case__ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCAmelCase : str = encrypt_key.shape[0]
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.key_string.index(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return self.key_string[round(snake_case__ )]
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase : Optional[Any] = det % len(self.key_string )
lowerCAmelCase : Dict = len(self.key_string )
if greatest_common_divisor(snake_case__ , len(self.key_string ) ) != 1:
lowerCAmelCase : str = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = [char for char in text.upper() if char in self.key_string]
lowerCAmelCase : int = chars[-1]
while len(snake_case__ ) % self.break_key != 0:
chars.append(snake_case__ )
return "".join(snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.process_text(text.upper() )
lowerCAmelCase : Dict = ""
for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ):
lowerCAmelCase : Tuple = text[i : i + self.break_key]
lowerCAmelCase : Tuple = [self.replace_letters(snake_case__ ) for char in batch]
lowerCAmelCase : Dict = numpy.array([vec] ).T
lowerCAmelCase : List[str] = self.modulus(self.encrypt_key.dot(snake_case__ ) ).T.tolist()[
0
]
lowerCAmelCase : int = "".join(
self.replace_digits(snake_case__ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase : int = det % len(self.key_string )
lowerCAmelCase : Tuple = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCAmelCase : Union[str, Any] = i
break
lowerCAmelCase : Optional[int] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(snake_case__ ) )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : int = self.make_decrypt_key()
lowerCAmelCase : Tuple = self.process_text(text.upper() )
lowerCAmelCase : List[str] = ""
for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ):
lowerCAmelCase : List[str] = text[i : i + self.break_key]
lowerCAmelCase : int = [self.replace_letters(snake_case__ ) for char in batch]
lowerCAmelCase : Union[str, Any] = numpy.array([vec] ).T
lowerCAmelCase : Optional[Any] = self.modulus(decrypt_key.dot(snake_case__ ) ).T.tolist()[0]
lowerCAmelCase : int = "".join(
self.replace_digits(snake_case__ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = int(input("Enter the order of the encryption key: " ) )
lowerCAmelCase : int = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Dict = [int(SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = HillCipher(numpy.array(SCREAMING_SNAKE_CASE ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
lowerCAmelCase : int = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
lowerCAmelCase : Optional[int] = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(SCREAMING_SNAKE_CASE ) )
elif option == "2":
lowerCAmelCase : Union[str, Any] = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 681 |
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 681 | 1 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
| 681 |
"""simple docstring"""
import math
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return math.sqrt(SCREAMING_SNAKE_CASE ) * math.sqrt(SCREAMING_SNAKE_CASE ) == num
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = 0
lowerCAmelCase : List[str] = n
while left <= right:
lowerCAmelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCAmelCase : int = mid - 1
else:
lowerCAmelCase : int = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 | 1 |
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a : str =JukeboxTokenizer
a : str ={
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
import torch
lowerCAmelCase : Optional[Any] = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
lowerCAmelCase : Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowerCAmelCase : Dict = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def lowercase__ ( self ):
"""simple docstring"""
import torch
lowerCAmelCase : Optional[int] = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
lowerCAmelCase : Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
lowerCAmelCase : Any = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 681 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Union[str, Any] ="vit"
def __init__( self , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=True , snake_case__=16 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : List[Any] = num_attention_heads
lowerCAmelCase : Union[str, Any] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : Optional[Any] = layer_norm_eps
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Tuple = num_channels
lowerCAmelCase : Optional[int] = qkv_bias
lowerCAmelCase : str = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] =version.parse("1.11" )
@property
def lowercase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase__ ( self ):
"""simple docstring"""
return 1e-4
| 681 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : List[Any] ="bert-generation"
def __init__( self , snake_case__=50_358 , snake_case__=1_024 , snake_case__=24 , snake_case__=16 , snake_case__=4_096 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=2 , snake_case__=1 , snake_case__="absolute" , snake_case__=True , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Any = vocab_size
lowerCAmelCase : Union[str, Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : str = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Union[str, Any] = position_embedding_type
lowerCAmelCase : int = use_cache
| 681 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 681 | 1 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def a__ ( SCREAMING_SNAKE_CASE : str = "laptop" ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCAmelCase : str = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
lowerCAmelCase : Union[str, Any] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE ).text )
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
lowerCAmelCase : Any = item.ha.text
lowerCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
lowerCAmelCase : Tuple = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
lowerCAmelCase : List[Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
lowerCAmelCase : List[Any] = "Not available"
try:
lowerCAmelCase : Any = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
lowerCAmelCase : Tuple = ""
try:
lowerCAmelCase : int = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 1_0_0 )
except ValueError:
lowerCAmelCase : Tuple = float("nan" )
except AttributeError:
pass
lowerCAmelCase : Tuple = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase : int = " "
lowerCAmelCase : List[Any] = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase__ = '''headphones'''
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 681 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__="resnet50" , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=True , snake_case__=True , ):
"""simple docstring"""
lowerCAmelCase : List[str] = parent
lowerCAmelCase : Union[str, Any] = out_indices if out_indices is not None else [4]
lowerCAmelCase : Tuple = stage_names
lowerCAmelCase : Any = out_features
lowerCAmelCase : Any = backbone
lowerCAmelCase : Union[str, Any] = batch_size
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : Tuple = is_training
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values
def lowercase__ ( self ):
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = TimmBackbone(config=snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase : Tuple = config_and_inputs
lowerCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =(TimmBackbone,) if is_torch_available() else ()
a : Union[str, Any] ={"feature-extraction": TimmBackbone} if is_torch_available() else {}
a : Tuple =False
a : List[Any] =False
a : Optional[Any] =False
a : Dict =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TimmBackboneModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = "resnet18"
lowerCAmelCase : str = "microsoft/resnet-18"
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ )
lowerCAmelCase : List[str] = AutoBackbone.from_pretrained(snake_case__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(snake_case__ , use_timm_backbone=snake_case__ , out_indices=[1, 2, 3] )
lowerCAmelCase : List[Any] = AutoBackbone.from_pretrained(snake_case__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Safetensors is not supported by timm." )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
lowerCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : int = True
lowerCAmelCase : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase : Optional[int] = self.all_model_classes[0]
lowerCAmelCase : Union[str, Any] = model_class(snake_case__ )
model.to(snake_case__ )
lowerCAmelCase : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = model(**snake_case__ )
lowerCAmelCase : Tuple = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase : Optional[int] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : List[str] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase : Dict = copy.deepcopy(snake_case__ )
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[int] = model(**snake_case__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase : Optional[int] = copy.deepcopy(snake_case__ )
lowerCAmelCase : List[str] = False
lowerCAmelCase : int = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCAmelCase : Optional[Any] = model(**snake_case__ )
| 681 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Tuple =(DPMSolverSinglestepScheduler,)
a : List[str] =(("num_inference_steps", 25),)
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float("inf" ),
"variance_type": None,
}
config.update(**snake_case__ )
return config
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
lowerCAmelCase : Dict = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : Tuple = self.dummy_sample
lowerCAmelCase : Tuple = 0.1 * sample
lowerCAmelCase : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : str = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : str = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase : Dict = sample, sample
for t in range(snake_case__ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase : List[Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : Dict = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = dict(self.forward_default_kwargs )
lowerCAmelCase : Dict = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : int = self.dummy_sample
lowerCAmelCase : List[str] = 0.1 * sample
lowerCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : List[str] = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase : Optional[int] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : Union[str, Any] = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self , snake_case__=None , **snake_case__ ):
"""simple docstring"""
if scheduler is None:
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : Union[str, Any] = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : int = scheduler_class(**snake_case__ )
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : Optional[Any] = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : Union[str, Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : Any = 10
lowerCAmelCase : Optional[Any] = self.dummy_model()
lowerCAmelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Optional[Any] = model(snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase : List[Any] = 50
lowerCAmelCase : List[Any] = self.dummy_model()
lowerCAmelCase : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCAmelCase : List[Any] = model(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
lowerCAmelCase : str = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase : Dict = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Dict = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase : Optional[int] = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=snake_case__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , algorithm_type="dpmsolver++" , solver_order=snake_case__ , solver_type=snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
lowerCAmelCase : Union[str, Any] = self.full_loop(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
assert not torch.isnan(snake_case__ ).any(), "Samples have nan numbers"
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=snake_case__ )
self.check_over_configs(lower_order_final=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float("inf" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(variance_type=snake_case__ )
self.check_over_configs(variance_type="learned_range" )
def lowercase__ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=snake_case__ , time_step=0 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.full_loop()
lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.full_loop(use_karras_sigmas=snake_case__ )
lowerCAmelCase : str = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.full_loop(prediction_type="v_prediction" , use_karras_sigmas=snake_case__ )
lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : List[str] = self.get_scheduler_config(thresholding=snake_case__ , dynamic_thresholding_ratio=0 )
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
lowerCAmelCase : List[str] = 10
lowerCAmelCase : Union[str, Any] = self.dummy_model()
lowerCAmelCase : List[Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : int = model(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
assert sample.dtype == torch.floataa
| 681 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(r'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(r'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(r'''\[([^\]]+)\]''')
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]="" , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Optional[int] = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
lowerCAmelCase : Dict = ["\n".join(lines[:index] )]
else:
lowerCAmelCase : Any = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase : List[str] = [lines[index + 1]]
index += 1
else:
lowerCAmelCase : Optional[Any] = []
else:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
def _inner(SCREAMING_SNAKE_CASE : Optional[Any] ):
return key(SCREAMING_SNAKE_CASE ).lower().replace("_" , "" )
return _inner
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=None ):
'''simple docstring'''
def noop(SCREAMING_SNAKE_CASE : List[Any] ):
return x
if key is None:
lowerCAmelCase : int = noop
# Constants are all uppercase, they go first.
lowerCAmelCase : Dict = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase : List[Any] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCAmelCase : Dict = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def _replace(SCREAMING_SNAKE_CASE : List[Any] ):
lowerCAmelCase : List[str] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase : Dict = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Any = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
lowerCAmelCase : List[Any] = import_statement.split("\n" )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase : Tuple = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase : Optional[Any] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase : Optional[Any] = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
lowerCAmelCase : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase : List[str] = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase : Union[str, Any] = keys[:-1]
lowerCAmelCase : str = get_indent(lines[1] ) + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase : Any = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=True ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
lowerCAmelCase : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase : List[str] = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase : Tuple = main_blocks[block_idx]
lowerCAmelCase : Optional[Any] = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase : int = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase : Optional[Any] = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase : Tuple = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase : Tuple = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase : int = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
lowerCAmelCase : Union[str, Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase : Tuple = 0
lowerCAmelCase : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase : List[Any] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write("\n".join(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : List[str]=True ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase : Tuple = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" ) , check_only=SCREAMING_SNAKE_CASE )
if result:
lowerCAmelCase : Optional[Any] = [os.path.join(SCREAMING_SNAKE_CASE , "__init__.py" )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 681 | 1 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=0 ):
'''simple docstring'''
if name is None:
lowerCAmelCase : Dict = None
else:
lowerCAmelCase : Any = "." * max(0 , spaces - 2 ) + "# {:" + str(5_0 - spaces ) + "s}"
lowerCAmelCase : Optional[int] = fmt.format(SCREAMING_SNAKE_CASE )
# Print and recurse (if needed).
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if msg is not None:
print(SCREAMING_SNAKE_CASE )
for k in val.keys():
recursive_print(SCREAMING_SNAKE_CASE , val[k] , spaces + 2 )
elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
print(SCREAMING_SNAKE_CASE , ":" , val.size() )
else:
print(SCREAMING_SNAKE_CASE , ":" , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Dict = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase : str = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase : Dict = param.view(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = param.transpose(0 , 2 )
lowerCAmelCase : List[Any] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase : Dict = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase : Optional[Any] = param.view(*SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = param.transpose(0 , 1 ).contiguous()
lowerCAmelCase : str = param.view(*SCREAMING_SNAKE_CASE )
return param
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : List[Any] = {}
# old versions did not store training args
lowerCAmelCase : Tuple = input_state_dict.get("args" , SCREAMING_SNAKE_CASE )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase : Dict = ds_args.padded_vocab_size
lowerCAmelCase : Optional[Any] = ds_args.max_position_embeddings
lowerCAmelCase : Dict = ds_args.hidden_size
lowerCAmelCase : Any = ds_args.num_layers
lowerCAmelCase : str = ds_args.num_attention_heads
lowerCAmelCase : Dict = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase : List[str] = config.n_head
# The hidden_size per head.
lowerCAmelCase : Optional[int] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase : Any = input_state_dict["checkpoint_version"]
else:
lowerCAmelCase : Optional[int] = 0.0
# The model.
lowerCAmelCase : Optional[int] = input_state_dict["model"]
# The language model.
lowerCAmelCase : Optional[Any] = model["language_model"]
# The embeddings.
lowerCAmelCase : List[Any] = lm["embedding"]
# The word embeddings.
lowerCAmelCase : str = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase : List[str] = word_embeddings[: config.vocab_size, :]
lowerCAmelCase : List[str] = word_embeddings
# The position embeddings.
lowerCAmelCase : Any = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase : str = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
lowerCAmelCase : Optional[int] = pos_embeddings
# The transformer.
lowerCAmelCase : Any = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
lowerCAmelCase : int = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
lowerCAmelCase : str = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase : Any = layer_re.match(SCREAMING_SNAKE_CASE )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase : List[str] = int(m.group(1 ) )
# The name of the operation.
lowerCAmelCase : Tuple = m.group(2 )
# Is it a weight or a bias?
lowerCAmelCase : int = m.group(3 )
# The name of the layer.
lowerCAmelCase : Optional[Any] = f"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
lowerCAmelCase : Any = "ln_1" if op_name.startswith("input" ) else "ln_2"
lowerCAmelCase : Union[str, Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase : Dict = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase : Optional[int] = torch.tensor(-1E4 , dtype=torch.floataa )
lowerCAmelCase : Any = masked_bias
lowerCAmelCase : Optional[Any] = fix_query_key_value_ordering(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 3 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase : Union[str, Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowerCAmelCase : Union[str, Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase : str = fix_query_key_value_ordering(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 3 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Store. No change of shape.
lowerCAmelCase : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase : Union[str, Any] = megatron_to_transformers[op_name]
lowerCAmelCase : List[Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase : Union[str, Any] = megatron_to_transformers[op_name]
lowerCAmelCase : Optional[Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase : Union[str, Any] = transformer["final_layernorm.weight"]
lowerCAmelCase : List[Any] = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase : Tuple = word_embeddings
# It should be done!
return output_state_dict
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=SCREAMING_SNAKE_CASE , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=SCREAMING_SNAKE_CASE , help="An optional config json file describing the pre-trained model." , )
lowerCAmelCase : str = parser.parse_args()
# Extract the basename.
lowerCAmelCase : Dict = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
lowerCAmelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )
else:
lowerCAmelCase : List[str] = torch.load(args.path_to_checkpoint , map_location="cpu" )
lowerCAmelCase : int = input_state_dict.get("args" , SCREAMING_SNAKE_CASE )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase : Dict = "gelu_fast"
elif ds_args.openai_gelu:
lowerCAmelCase : Optional[Any] = "gelu_new"
else:
lowerCAmelCase : List[str] = "gelu"
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase : Tuple = "gelu_new"
# Spell out all parameters in case the defaults change.
lowerCAmelCase : Union[str, Any] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=SCREAMING_SNAKE_CASE , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=SCREAMING_SNAKE_CASE , summary_activation=SCREAMING_SNAKE_CASE , summary_proj_to_labels=SCREAMING_SNAKE_CASE , summary_first_dropout=0.1 , scale_attn_weights=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
lowerCAmelCase : List[Any] = GPTaConfig.from_json_file(args.config_file )
lowerCAmelCase : Tuple = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
lowerCAmelCase : Optional[int] = convert_megatron_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase : List[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase : Tuple = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase : Any = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
lowerCAmelCase : Any = "gpt2"
lowerCAmelCase : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = type(SCREAMING_SNAKE_CASE ).__name__
lowerCAmelCase : Tuple = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(SCREAMING_SNAKE_CASE )
# Save tokenizer based on args
print(f"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
# Store the state_dict to file.
lowerCAmelCase : List[str] = os.path.join(SCREAMING_SNAKE_CASE , "pytorch_model.bin" )
print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 681 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = parent
lowerCAmelCase : List[Any] = batch_size
lowerCAmelCase : Union[str, Any] = image_size
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : List[Any] = embeddings_size
lowerCAmelCase : List[Any] = hidden_sizes
lowerCAmelCase : Optional[int] = depths
lowerCAmelCase : str = is_training
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : List[Any] = hidden_act
lowerCAmelCase : Optional[Any] = num_labels
lowerCAmelCase : Tuple = scope
lowerCAmelCase : int = len(snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Optional[Any] = None
if self.use_labels:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = TFResNetModel(config=snake_case__ )
lowerCAmelCase : Union[str, Any] = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : str = TFResNetForImageClassification(snake_case__ )
lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = config_and_inputs
lowerCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
a : Any =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
a : Tuple =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
a : int =False
a : List[str] =False
a : Optional[int] =False
a : Union[str, Any] =False
a : Any =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = TFResNetModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def lowercase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : List[str] = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Dict = [*signature.parameters.keys()]
lowerCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : int = model_class(snake_case__ )
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase : Optional[Any] = layer_type
lowerCAmelCase : Dict = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : List[Any] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = TFResNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase : Any = self.default_image_processor
lowerCAmelCase : Optional[Any] = prepare_img()
lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors="tf" )
# forward pass
lowerCAmelCase : str = model(**snake_case__ )
# verify the logits
lowerCAmelCase : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCAmelCase : str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
import queue
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = data
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : int = None
def a__ ( ):
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
lowerCAmelCase : Dict = input("Enter the value of the root node: " ).strip().lower()
lowerCAmelCase : queue.Queue = queue.Queue()
lowerCAmelCase : int = TreeNode(int(SCREAMING_SNAKE_CASE ) )
q.put(SCREAMING_SNAKE_CASE )
while not q.empty():
lowerCAmelCase : Optional[int] = q.get()
lowerCAmelCase : List[Any] = f"""Enter the left node of {node_found.data}: """
lowerCAmelCase : Optional[Any] = input(SCREAMING_SNAKE_CASE ).strip().lower() or "n"
if check == "n":
return tree_node
lowerCAmelCase : List[Any] = TreeNode(int(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : int = left_node
q.put(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = f"""Enter the right node of {node_found.data}: """
lowerCAmelCase : List[Any] = input(SCREAMING_SNAKE_CASE ).strip().lower() or "n"
if check == "n":
return tree_node
lowerCAmelCase : str = TreeNode(int(SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : List[str] = right_node
q.put(SCREAMING_SNAKE_CASE )
raise
def a__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def a__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def a__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def a__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
lowerCAmelCase : queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE )
while not q.empty():
lowerCAmelCase : List[str] = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
lowerCAmelCase : queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE )
while not q.empty():
lowerCAmelCase : List[Any] = []
while not q.empty():
lowerCAmelCase : str = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
lowerCAmelCase : list[TreeNode] = []
lowerCAmelCase : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = n.left
# end of while means current node doesn't have left child
lowerCAmelCase : List[str] = stack.pop()
# start to traverse its right child
lowerCAmelCase : List[Any] = n.right
def a__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
lowerCAmelCase : list[TreeNode] = []
lowerCAmelCase : Dict = node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = n.left
lowerCAmelCase : Any = stack.pop()
print(n.data , end="," )
lowerCAmelCase : int = n.right
def a__ ( SCREAMING_SNAKE_CASE : TreeNode ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not node:
return
lowerCAmelCase , lowerCAmelCase : Optional[int] = [], []
lowerCAmelCase : Any = node
stacka.append(SCREAMING_SNAKE_CASE )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase : Optional[int] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(SCREAMING_SNAKE_CASE )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def a__ ( SCREAMING_SNAKE_CASE : str = "" , SCREAMING_SNAKE_CASE : str=5_0 , SCREAMING_SNAKE_CASE : Tuple="*" ):
'''simple docstring'''
if not s:
return "\n" + width * char
lowerCAmelCase , lowerCAmelCase : int = divmod(width - len(SCREAMING_SNAKE_CASE ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
lowerCAmelCase__ = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 681 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=1_0_0_0 ):
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase : int = n - 1
lowerCAmelCase : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase : Optional[Any] = 0
while count < prec:
lowerCAmelCase : List[str] = random.randint(2 , n - 1 )
lowerCAmelCase : Tuple = bin_exp_mod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b != 1:
lowerCAmelCase : List[str] = True
for _ in range(SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCAmelCase : List[str] = False
break
lowerCAmelCase : Optional[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 681 | 1 |
"""simple docstring"""
from math import factorial
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ):
'''simple docstring'''
return sum(map(SCREAMING_SNAKE_CASE , str(factorial(SCREAMING_SNAKE_CASE ) ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 681 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : CommonSchedulerState
# setable values
a : jnp.ndarray
a : jnp.ndarray
a : Optional[int] =None
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : DDPMSchedulerState
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
a : Union[str, Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
a : jnp.dtype
@property
def lowercase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case__ = 1_000 , snake_case__ = 0.0001 , snake_case__ = 0.02 , snake_case__ = "linear" , snake_case__ = None , snake_case__ = "fixed_small" , snake_case__ = True , snake_case__ = "epsilon" , snake_case__ = jnp.floataa , ):
"""simple docstring"""
lowerCAmelCase : Any = dtype
def lowercase__ ( self , snake_case__ = None ):
"""simple docstring"""
if common is None:
lowerCAmelCase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase : str = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
"""simple docstring"""
return sample
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = () ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase : Any = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = state.common.alphas_cumprod[t]
lowerCAmelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase : List[Any] = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase : List[str] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase : List[str] = variance
lowerCAmelCase : Dict = state.common.betas[t]
lowerCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
lowerCAmelCase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = timestep
if key is None:
lowerCAmelCase : Tuple = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
lowerCAmelCase : Tuple = None
# 1. compute alphas, betas
lowerCAmelCase : Optional[int] = state.common.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Optional[int] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase : Tuple = jax.random.split(snake_case__ , num=1 )
lowerCAmelCase : str = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
lowerCAmelCase : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 681 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if n_term == "":
return []
lowerCAmelCase : list = []
for temp in range(int(SCREAMING_SNAKE_CASE ) ):
series.append(f"""1/{temp + 1}""" if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 681 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Tuple = OmegaConf.load(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
lowerCAmelCase : int = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = "first_stage_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = state_dict[key]
lowerCAmelCase : List[str] = config.model.params.first_stage_config.params
lowerCAmelCase : List[Any] = config.model.params.unet_config.params
lowerCAmelCase : Union[str, Any] = VQModel(**SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = UNetLDMModel(**SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=SCREAMING_SNAKE_CASE , )
lowerCAmelCase : Tuple = LDMPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
lowerCAmelCase__ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 681 | 1 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( lowercase , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =XLMProphetNetTokenizer
a : List[Any] =False
a : Optional[Any] =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase : int = XLMProphetNetTokenizer(snake_case__ , keep_accents=snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = "[PAD]"
lowerCAmelCase : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(snake_case__ ) , 1_012 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = XLMProphetNetTokenizer(snake_case__ , keep_accents=snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase : Any = tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertListEqual(
snake_case__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = "Hello World!"
lowerCAmelCase : Optional[Any] = [35_389, 6_672, 49, 2]
self.assertListEqual(snake_case__ , self.big_tokenizer.encode(snake_case__ ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = {"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 681 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0_0 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 681 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="decision_transformer"
a : Optional[int] =["past_key_values"]
a : int ={
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case__=17 , snake_case__=4 , snake_case__=128 , snake_case__=4_096 , snake_case__=True , snake_case__=1 , snake_case__=1_024 , snake_case__=3 , snake_case__=1 , snake_case__=None , snake_case__="relu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1e-5 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=50_256 , snake_case__=50_256 , snake_case__=False , snake_case__=False , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : str = state_dim
lowerCAmelCase : List[Any] = act_dim
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[Any] = max_ep_len
lowerCAmelCase : Union[str, Any] = action_tanh
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Dict = n_positions
lowerCAmelCase : List[str] = n_layer
lowerCAmelCase : Dict = n_head
lowerCAmelCase : Union[str, Any] = n_inner
lowerCAmelCase : Union[str, Any] = activation_function
lowerCAmelCase : List[Any] = resid_pdrop
lowerCAmelCase : Tuple = embd_pdrop
lowerCAmelCase : List[Any] = attn_pdrop
lowerCAmelCase : List[Any] = layer_norm_epsilon
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : str = scale_attn_weights
lowerCAmelCase : Any = use_cache
lowerCAmelCase : Union[str, Any] = scale_attn_by_inverse_layer_idx
lowerCAmelCase : Union[str, Any] = reorder_and_upcast_attn
lowerCAmelCase : Optional[Any] = bos_token_id
lowerCAmelCase : Optional[int] = eos_token_id
super().__init__(bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
| 681 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=snake_case__ , )
assert hasattr(self , "env" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = {
"enabled": True,
"processes_per_host": 8,
}
lowerCAmelCase : List[Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowerCAmelCase : List[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowerCAmelCase : Optional[Any] = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="py36" , )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
TrainingJobAnalytics(snake_case__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case__ )
| 681 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCAmelCase : Optional[Any] = MobileBertForPreTraining(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
lowerCAmelCase : Any = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 681 |
"""simple docstring"""
from math import factorial
def a__ ( SCREAMING_SNAKE_CASE : int = 1_0_0 ):
'''simple docstring'''
return sum(int(SCREAMING_SNAKE_CASE ) for x in str(factorial(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 681 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.