code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def UpperCAmelCase__( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , ):
__snake_case : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
__snake_case : Any = 1 - (matter_density + radiation_density + dark_energy)
__snake_case : Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__snake_case : List[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__magic_name__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 679 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 1 |
from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | 1 |
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
__snake_case : Optional[int] = 0
__snake_case : List[Any] = len(__UpperCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __UpperCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
if len(__UpperCAmelCase ) <= 1:
return arr, 0
__snake_case : List[str] = len(__UpperCAmelCase ) // 2
__snake_case : Any = arr[0:mid]
__snake_case : Dict = arr[mid:]
__snake_case , __snake_case : Union[str, Any] = count_inversions_recursive(__UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = count_inversions_recursive(__UpperCAmelCase )
__snake_case , __snake_case : List[str] = _count_cross_inversions(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : List[Any] ):
__snake_case : Union[str, Any] = []
__snake_case : Optional[Any] = 0
while i < len(__UpperCAmelCase ) and j < len(__UpperCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__UpperCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__UpperCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase__( ):
__snake_case : Union[str, Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__snake_case : Dict = count_inversions_bf(__UpperCAmelCase )
__snake_case , __snake_case : List[Any] = count_inversions_recursive(__UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , __UpperCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__snake_case : Any = count_inversions_bf(__UpperCAmelCase )
__snake_case , __snake_case : int = count_inversions_recursive(__UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __UpperCAmelCase )
# an empty list should also have zero inversions
__snake_case : List[Any] = []
__snake_case : str = count_inversions_bf(__UpperCAmelCase )
__snake_case , __snake_case : Optional[Any] = count_inversions_recursive(__UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 679 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "efficientformer"
def __init__( self , _UpperCAmelCase = [3, 2, 6, 4] , _UpperCAmelCase = [48, 96, 224, 448] , _UpperCAmelCase = [True, True, True, True] , _UpperCAmelCase = 448 , _UpperCAmelCase = 32 , _UpperCAmelCase = 4 , _UpperCAmelCase = 7 , _UpperCAmelCase = 5 , _UpperCAmelCase = 8 , _UpperCAmelCase = 4 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 16 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 3 , _UpperCAmelCase = 2 , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = 1E-5 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.02 , _UpperCAmelCase = 1E-12 , _UpperCAmelCase = 224 , _UpperCAmelCase = 1E-05 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Dict = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Optional[int] = hidden_sizes
__snake_case : Any = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Tuple = initializer_range
__snake_case : int = layer_norm_eps
__snake_case : int = patch_size
__snake_case : List[str] = num_channels
__snake_case : Optional[Any] = depths
__snake_case : Union[str, Any] = mlp_expansion_ratio
__snake_case : Dict = downsamples
__snake_case : Any = dim
__snake_case : str = key_dim
__snake_case : int = attention_ratio
__snake_case : Optional[Any] = resolution
__snake_case : str = pool_size
__snake_case : Dict = downsample_patch_size
__snake_case : List[Any] = downsample_stride
__snake_case : List[str] = downsample_pad
__snake_case : Dict = drop_path_rate
__snake_case : List[str] = num_metaad_blocks
__snake_case : Optional[Any] = distillation
__snake_case : Tuple = use_layer_scale
__snake_case : Tuple = layer_scale_init_value
__snake_case : Dict = image_size
__snake_case : str = batch_norm_eps
| 679 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 1 |
def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__magic_name__ = parser.parse_args()
if args.model_type == "bert":
__magic_name__ = BertForMaskedLM.from_pretrained(args.model_name)
__magic_name__ = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
__magic_name__ = model.state_dict()
__magic_name__ = {}
for w in ["word_embeddings", "position_embeddings"]:
__magic_name__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__magic_name__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
__magic_name__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__magic_name__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__magic_name__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__magic_name__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__magic_name__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__magic_name__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__magic_name__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__magic_name__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__magic_name__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__magic_name__ = state_dict['''cls.predictions.decoder.weight''']
__magic_name__ = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__magic_name__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
__magic_name__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 679 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 1 |
from __future__ import annotations
def UpperCAmelCase__( __UpperCAmelCase : list[float] ):
if len(__UpperCAmelCase ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
__snake_case : Any = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
__snake_case : Tuple = parent
__snake_case : int = batch_size
__snake_case : Union[str, Any] = seq_length
__snake_case : Any = is_training
__snake_case : Tuple = use_input_mask
__snake_case : List[Any] = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : int = vocab_size
__snake_case : str = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : int = hidden_act
__snake_case : List[str] = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : int = max_position_embeddings
__snake_case : Optional[Any] = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = num_labels
__snake_case : Tuple = num_choices
__snake_case : Optional[int] = scope
__snake_case : Tuple = vocab_size - 1
def lowercase_ ( self ):
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = None
if self.use_input_mask:
__snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : List[str] = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[str] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase_ ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowercase_ ( self ):
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case : List[str] = True
return config, input_ids, input_mask, token_labels
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = GPTNeoXModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
__snake_case : List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = True
__snake_case : int = GPTNeoXModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Dict = GPTNeoXForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = self.num_labels
__snake_case : Tuple = GPTNeoXForQuestionAnswering(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Dict = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[str] = self.num_labels
__snake_case : Optional[int] = GPTNeoXForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = self.num_labels
__snake_case : Optional[int] = GPTNeoXForTokenClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[int] = True
__snake_case : List[Any] = GPTNeoXForCausalLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# first forward pass
__snake_case : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
__snake_case : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case : str = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
__snake_case : Optional[int] = output_from_no_past['hidden_states'][0]
__snake_case : Union[str, Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )['hidden_states'][0]
# select random slice
__snake_case : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : str = config_and_inputs
__snake_case : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Tuple = GPTNeoXModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=64 , num_attention_heads=8 )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
def lowercase_ ( self ):
__snake_case , __snake_case , __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case , __snake_case , __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
# This regression test was failing with PyTorch < 1.3
__snake_case , __snake_case , __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def lowercase_ ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Optional[Any] = ids_tensor([1, 10] , config.vocab_size )
__snake_case : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : Tuple = GPTNeoXModel(_UpperCAmelCase )
original_model.to(_UpperCAmelCase )
original_model.eval()
__snake_case : Optional[Any] = original_model(_UpperCAmelCase ).last_hidden_state
__snake_case : List[str] = original_model(_UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case : Any = {'type': scaling_type, 'factor': 10.0}
__snake_case : Optional[Any] = GPTNeoXModel(_UpperCAmelCase )
scaled_model.to(_UpperCAmelCase )
scaled_model.eval()
__snake_case : Tuple = scaled_model(_UpperCAmelCase ).last_hidden_state
__snake_case : Dict = scaled_model(_UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase_ ( self ):
__snake_case : Any = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
__snake_case : Union[str, Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_UpperCAmelCase )
__snake_case : Tuple = tokenizer('My favorite food is' , return_tensors='pt' ).to(_UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__snake_case : Tuple = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
__snake_case : Optional[Any] = model.generate(**_UpperCAmelCase , do_sample=_UpperCAmelCase , max_new_tokens=20 )
__snake_case : str = tokenizer.batch_decode(_UpperCAmelCase )[0]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 679 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__snake_case : Any = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = 'sshleifer/tiny-gpt2'
__snake_case : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
__snake_case : Any = TensorFlowBenchmark(_UpperCAmelCase )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = 'sgugger/tiny-distilbert-classification'
__snake_case : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , only_pretrain_model=_UpperCAmelCase , )
__snake_case : Optional[Any] = TensorFlowBenchmark(_UpperCAmelCase )
__snake_case : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
__snake_case : Dict = 'sshleifer/tiny-gpt2'
__snake_case : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__snake_case : Optional[Any] = TensorFlowBenchmark(_UpperCAmelCase )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
__snake_case : Dict = 'sshleifer/tiny-gpt2'
__snake_case : Any = AutoConfig.from_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
__snake_case : Dict = TensorFlowBenchmark(_UpperCAmelCase , [config] )
__snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
__snake_case : int = 'sshleifer/tiny-gpt2'
__snake_case : int = AutoConfig.from_pretrained(_UpperCAmelCase )
__snake_case : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(_UpperCAmelCase , [config] )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
__snake_case : Optional[Any] = 'sshleifer/tiny-gpt2'
__snake_case : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__snake_case : Optional[int] = TensorFlowBenchmark(_UpperCAmelCase )
__snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ ( self ):
__snake_case : str = 'sshleifer/tiny-gpt2'
__snake_case : Dict = AutoConfig.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(_UpperCAmelCase , [config] )
__snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowercase_ ( self ):
__snake_case : Optional[int] = 'patrickvonplaten/t5-tiny-random'
__snake_case : int = AutoConfig.from_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
__snake_case : Optional[int] = TensorFlowBenchmark(_UpperCAmelCase , configs=[config] )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def lowercase_ ( self ):
__snake_case : int = 'sshleifer/tiny-gpt2'
__snake_case : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
__snake_case : Optional[int] = TensorFlowBenchmark(_UpperCAmelCase )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowercase_ ( self ):
__snake_case : Optional[int] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_UpperCAmelCase , save_to_csv=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCAmelCase , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_UpperCAmelCase , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_UpperCAmelCase , 'env.csv' ) , multi_process=_UpperCAmelCase , )
__snake_case : str = TensorFlowBenchmark(_UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'env.csv' ) ).exists() )
def lowercase_ ( self ):
__snake_case : int = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_UpperCAmelCase ):
self.assertTrue(hasattr(_UpperCAmelCase , 'sequential' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'cumulative' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'current' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCAmelCase , 'log.txt' ) , log_print=_UpperCAmelCase , trace_memory_line_by_line=_UpperCAmelCase , eager_mode=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
__snake_case : Optional[Any] = TensorFlowBenchmark(_UpperCAmelCase )
__snake_case : List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , 'log.txt' ) ).exists() )
| 679 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
__UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
__UpperCAmelCase = "image_qa"
__UpperCAmelCase = AutoProcessor
__UpperCAmelCase = AutoModelForVisualQuestionAnswering
__UpperCAmelCase = ["image", "text"]
__UpperCAmelCase = ["text"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['vision'] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
return self.pre_processor(_UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' )
def lowercase_ ( self , _UpperCAmelCase ):
with torch.no_grad():
return self.model(**_UpperCAmelCase ).logits
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 679 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 1 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def UpperCAmelCase__( __UpperCAmelCase : Dict ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def UpperCAmelCase__( ):
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case : Optional[Any] = [1, 2, 3]
with pytest.raises(__UpperCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=2 )
with pytest.raises(__UpperCAmelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
__snake_case : Optional[int] = [1, 2]
__snake_case : List[Any] = {'a': 1, 'b': 2}
__snake_case : Optional[int] = {'a': [1, 2], 'b': [3, 4]}
__snake_case : List[str] = {'a': {'1': 1}, 'b': 2}
__snake_case : str = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
__snake_case : List[Any] = [2, 3]
__snake_case : Optional[Any] = {'a': 2, 'b': 3}
__snake_case : str = {'a': [2, 3], 'b': [4, 5]}
__snake_case : Any = {'a': {'1': 2}, 'b': 3}
__snake_case : List[str] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(__UpperCAmelCase , __UpperCAmelCase , num_proc=__UpperCAmelCase ) == expected_map_nested_sa
| 679 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , ):
if attention_mask is None:
__snake_case : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__snake_case : Tuple = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__snake_case : Optional[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=__UpperCAmelCase )
if decoder_head_mask is None:
__snake_case : List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__UpperCAmelCase )
if cross_attn_head_mask is None:
__snake_case : int = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=__UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="relu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ):
__snake_case : Union[str, Any] = parent
__snake_case : str = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : str = use_labels
__snake_case : Tuple = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : int = intermediate_size
__snake_case : int = hidden_act
__snake_case : List[Any] = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Dict = encoder_layerdrop
__snake_case : int = decoder_layerdrop
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = eos_token_id
__snake_case : Tuple = pad_token_id
__snake_case : Optional[int] = bos_token_id
def lowercase_ ( self ):
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[Any] = self.eos_token_id # Eos Token
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__snake_case : str = input_ids.clamp(self.pad_token_id + 1 )
__snake_case : Any = decoder_input_ids.clamp(self.pad_token_id + 1 )
__snake_case : int = self.get_config()
__snake_case : Dict = prepare_mam_aaa_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def lowercase_ ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowercase_ ( self ):
__snake_case , __snake_case : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = MaMaaaModel(config=_UpperCAmelCase ).get_decoder().to(_UpperCAmelCase ).eval()
__snake_case : str = inputs_dict['input_ids']
__snake_case : Union[str, Any] = inputs_dict['attention_mask']
__snake_case : Any = inputs_dict['head_mask']
# first forward pass
__snake_case : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
__snake_case , __snake_case : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__snake_case : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__snake_case : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__snake_case : Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )['last_hidden_state']
__snake_case : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[
'last_hidden_state'
]
# select random slice
__snake_case : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-2 ) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = MaMaaaModel(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
__snake_case : str = model(**_UpperCAmelCase )
__snake_case : Dict = outputs.encoder_last_hidden_state
__snake_case : str = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[Any] = model.get_encoder()
encoder.save_pretrained(_UpperCAmelCase )
__snake_case : int = MaMaaaEncoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Any = model.get_decoder()
decoder.save_pretrained(_UpperCAmelCase )
__snake_case : Tuple = MaMaaaDecoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
__snake_case : List[Any] = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowercase_ ( self ):
__snake_case : Union[str, Any] = MaMaaaModelTester(self )
__snake_case : Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
def lowercase_ ( self ):
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__snake_case : Dict = model_class(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
__snake_case , __snake_case : List[Any] = model_class.from_pretrained(_UpperCAmelCase , output_loading_info=_UpperCAmelCase )
self.assertEqual(info['missing_keys'] , [] )
def lowercase_ ( self ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
__snake_case : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Optional[int] = copy.deepcopy(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
if not self.is_encoder_decoder:
__snake_case : Optional[int] = inputs['input_ids']
del inputs["input_ids"]
else:
__snake_case : int = inputs['input_ids']
__snake_case : List[str] = inputs.get('decoder_input_ids' , _UpperCAmelCase )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , _UpperCAmelCase )
__snake_case : Optional[int] = model.get_input_embeddings()
if not self.is_encoder_decoder:
__snake_case : Tuple = wte(_UpperCAmelCase )
else:
__snake_case : List[Any] = wte(_UpperCAmelCase )
__snake_case : Optional[Any] = wte(_UpperCAmelCase )
with torch.no_grad():
model(**_UpperCAmelCase )[0]
def lowercase_ ( self ):
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
__snake_case : Any = input_dict['input_ids']
__snake_case : List[Any] = input_ids.ne(1 ).to(_UpperCAmelCase )
__snake_case : Optional[int] = MaMaaaForConditionalGeneration(_UpperCAmelCase ).eval().to(_UpperCAmelCase )
if torch_device == "cuda":
model.half()
model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
model.generate(num_beams=4 , do_sample=_UpperCAmelCase , early_stopping=_UpperCAmelCase , num_return_sequences=3 )
def UpperCAmelCase__( __UpperCAmelCase : str ):
return torch.tensor(__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase )
__magic_name__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def lowercase_ ( self ):
__snake_case : int = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(_UpperCAmelCase )
__snake_case : Optional[Any] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
__snake_case : Union[str, Any] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
__snake_case : int = prepare_mam_aaa_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )[0]
__snake_case : Tuple = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
__snake_case : Optional[int] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=_UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def lowercase_ ( self ):
__snake_case : List[str] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_UpperCAmelCase )
# change to intended input
__snake_case : Union[str, Any] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
__snake_case : str = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
__snake_case : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
with torch.no_grad():
__snake_case : List[str] = model(**_UpperCAmelCase )[0]
__snake_case : Optional[int] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
__snake_case : Optional[Any] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=_UpperCAmelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def lowercase_ ( self ):
__snake_case : Any = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
__snake_case : Optional[Any] = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
__snake_case : int = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
__snake_case : str = model.generate(
input_ids=dct['input_ids'].to(_UpperCAmelCase ) , attention_mask=dct['attention_mask'].to(_UpperCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
__snake_case : Tuple = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
__snake_case : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
assert generated == expected_en
| 679 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679 | 1 |
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] ):
def update_area_of_max_square(__UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case : Dict = update_area_of_max_square(__UpperCAmelCase , col + 1 )
__snake_case : Any = update_area_of_max_square(row + 1 , col + 1 )
__snake_case : Any = update_area_of_max_square(row + 1 , __UpperCAmelCase )
if mat[row][col]:
__snake_case : Any = 1 + min([right, diagonal, down] )
__snake_case : Union[str, Any] = max(largest_square_area[0] , __UpperCAmelCase )
return sub_problem_sol
else:
return 0
__snake_case : Optional[int] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
__UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case : Any = update_area_of_max_square_using_dp_array(__UpperCAmelCase , col + 1 , __UpperCAmelCase )
__snake_case : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __UpperCAmelCase )
__snake_case : List[Any] = update_area_of_max_square_using_dp_array(row + 1 , __UpperCAmelCase , __UpperCAmelCase )
if mat[row][col]:
__snake_case : Dict = 1 + min([right, diagonal, down] )
__snake_case : str = max(largest_square_area[0] , __UpperCAmelCase )
__snake_case : Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case : Tuple = [0]
__snake_case : Dict = [[-1] * cols for _ in range(__UpperCAmelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , __UpperCAmelCase )
return largest_square_area[0]
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] ):
__snake_case : List[str] = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case : str = dp_array[row][col + 1]
__snake_case : Dict = dp_array[row + 1][col + 1]
__snake_case : str = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case : str = 1 + min(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__snake_case : List[str] = max(dp_array[row][col] , __UpperCAmelCase )
else:
__snake_case : List[Any] = 0
return largest_square_area
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] ):
__snake_case : Dict = [0] * (cols + 1)
__snake_case : List[Any] = [0] * (cols + 1)
__snake_case : Union[str, Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case : List[str] = current_row[col + 1]
__snake_case : int = next_row[col + 1]
__snake_case : List[str] = next_row[col]
if mat[row][col] == 1:
__snake_case : Optional[int] = 1 + min(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__snake_case : Any = max(current_row[col] , __UpperCAmelCase )
else:
__snake_case : Any = 0
__snake_case : str = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 679 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : int = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['stage2', 'stage3', 'stage4'] , )
__snake_case : Tuple = DetaConfig(
backbone_config=__UpperCAmelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=__UpperCAmelCase , with_box_refine=__UpperCAmelCase , two_stage=__UpperCAmelCase , )
# set labels
__snake_case : int = 'huggingface/label-files'
if "o365" in model_name:
__snake_case : List[str] = 3_66
__snake_case : str = 'object365-id2label.json'
else:
__snake_case : str = 91
__snake_case : Optional[int] = 'coco-detection-id2label.json'
__snake_case : Union[str, Any] = num_labels
__snake_case : Any = json.load(open(cached_download(hf_hub_url(__UpperCAmelCase , __UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
__snake_case : Union[str, Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case : List[str] = idalabel
__snake_case : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.patch_embed.proj.weight', 'model.backbone.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.0.body.patch_embed.proj.bias', 'model.backbone.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.0.body.patch_embed.norm.weight', 'model.backbone.model.embeddings.norm.weight') )
rename_keys.append(('backbone.0.body.patch_embed.norm.bias', 'model.backbone.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(('backbone.0.body.norm1.weight', 'model.backbone.model.hidden_states_norms.stage2.weight') )
rename_keys.append(('backbone.0.body.norm1.bias', 'model.backbone.model.hidden_states_norms.stage2.bias') )
rename_keys.append(('backbone.0.body.norm2.weight', 'model.backbone.model.hidden_states_norms.stage3.weight') )
rename_keys.append(('backbone.0.body.norm2.bias', 'model.backbone.model.hidden_states_norms.stage3.bias') )
rename_keys.append(('backbone.0.body.norm3.weight', 'model.backbone.model.hidden_states_norms.stage4.weight') )
rename_keys.append(('backbone.0.body.norm3.bias', 'model.backbone.model.hidden_states_norms.stage4.bias') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict ):
__snake_case : str = dct.pop(__UpperCAmelCase )
__snake_case : List[str] = val
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ):
__snake_case : Dict = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__snake_case : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__snake_case : List[Any] = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__snake_case : Any = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case : str = in_proj_weight[:dim, :]
__snake_case : Any = in_proj_bias[: dim]
__snake_case : List[str] = in_proj_weight[
dim : dim * 2, :
]
__snake_case : str = in_proj_bias[
dim : dim * 2
]
__snake_case : str = in_proj_weight[
-dim :, :
]
__snake_case : Any = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] ):
# transformer decoder self-attention layers
__snake_case : Dict = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__snake_case : Optional[Any] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__snake_case : List[Any] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Union[str, Any] = in_proj_weight[:hidden_size, :]
__snake_case : Any = in_proj_bias[:hidden_size]
__snake_case : Tuple = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__snake_case : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2]
__snake_case : Any = in_proj_weight[-hidden_size:, :]
__snake_case : Tuple = in_proj_bias[-hidden_size:]
def UpperCAmelCase__( ):
__snake_case : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : str = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ):
__snake_case : Any = get_deta_config(__UpperCAmelCase )
# load original state dict
if model_name == "deta-swin-large":
__snake_case : Optional[Any] = hf_hub_download(repo_id='nielsr/deta-checkpoints' , filename='adet_swin_ft.pth' )
elif model_name == "deta-swin-large-o365":
__snake_case : Any = hf_hub_download(repo_id='jozhang97/deta-swin-l-o365' , filename='deta_swin_pt_o365.pth' )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
__snake_case : Tuple = torch.load(__UpperCAmelCase , map_location='cpu' )['model']
# original state dict
for name, param in state_dict.items():
print(__UpperCAmelCase , param.shape )
# rename keys
__snake_case : List[str] = create_rename_keys(__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_swin_q_k_v(__UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(__UpperCAmelCase , __UpperCAmelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__snake_case : List[str] = state_dict.pop(__UpperCAmelCase )
__snake_case : str = val
if "input_proj" in key:
__snake_case : Union[str, Any] = state_dict.pop(__UpperCAmelCase )
__snake_case : Dict = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__snake_case : int = state_dict.pop(__UpperCAmelCase )
__snake_case : Dict = val
# finally, create HuggingFace model and load state dict
__snake_case : Dict = DetaForObjectDetection(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
__snake_case : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(__UpperCAmelCase )
# load image processor
__snake_case : int = DetaImageProcessor(format='coco_detection' )
# verify our conversion on image
__snake_case : int = prepare_img()
__snake_case : Any = processor(images=__UpperCAmelCase , return_tensors='pt' )
__snake_case : Dict = encoding['pixel_values']
__snake_case : Union[str, Any] = model(pixel_values.to(__UpperCAmelCase ) )
# verify logits
print('Logits:' , outputs.logits[0, :3, :3] )
print('Boxes:' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__snake_case : List[Any] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__snake_case : Union[str, Any] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__snake_case : Dict = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__snake_case : int = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__UpperCAmelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__UpperCAmelCase ) , atol=1E-4 )
print('Everything ok!' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
# Push to hub
if push_to_hub:
print('Pushing model and processor to hub...' )
model.push_to_hub(F"""jozhang97/{model_name}""" )
processor.push_to_hub(F"""jozhang97/{model_name}""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__magic_name__ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 679 | import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679 | 1 |
def UpperCAmelCase__( __UpperCAmelCase : int = 10_00 ):
__snake_case : int = 2**power
__snake_case : Any = str(__UpperCAmelCase )
__snake_case : Optional[Any] = list(__UpperCAmelCase )
__snake_case : List[Any] = 0
for i in list_num:
sum_of_num += int(__UpperCAmelCase )
return sum_of_num
if __name__ == "__main__":
__magic_name__ = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
__magic_name__ = solution(power)
print('''Sum of the digits is: ''', result)
| 679 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 1 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__magic_name__ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : Any = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
__snake_case : Optional[int] = list(s_dict.keys() )
for key in keys:
__snake_case : int = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__snake_case : List[str] = new_key.replace(__UpperCAmelCase , __UpperCAmelCase )
print(F"""{key} -> {new_key}""" )
__snake_case : Any = s_dict.pop(__UpperCAmelCase )
return s_dict
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
__snake_case , __snake_case : str = emb.weight.shape
__snake_case : List[Any] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
__snake_case : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__snake_case : Dict = os.path.basename(__UpperCAmelCase )
__snake_case : Any = url.split('/' )[-2]
__snake_case : str = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if os.path.exists(__UpperCAmelCase ) and not os.path.isfile(__UpperCAmelCase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(__UpperCAmelCase ):
__snake_case : Tuple = open(__UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(__UpperCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(__UpperCAmelCase ) as source, open(__UpperCAmelCase , 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=__UpperCAmelCase , unit_divisor=10_24 ) as loop:
while True:
__snake_case : Dict = source.read(81_92 )
if not buffer:
break
output.write(__UpperCAmelCase )
loop.update(len(__UpperCAmelCase ) )
__snake_case : Optional[int] = open(__UpperCAmelCase , 'rb' ).read()
if hashlib.shaaaa(__UpperCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple ):
if ".pt" not in checkpoint_path:
__snake_case : Tuple = _download(_MODELS[checkpoint_path] )
else:
__snake_case : Tuple = torch.load(__UpperCAmelCase , map_location='cpu' )
__snake_case : Optional[int] = original_checkpoint['dims']
__snake_case : Dict = original_checkpoint['model_state_dict']
__snake_case : List[Any] = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(__UpperCAmelCase )
rename_keys(__UpperCAmelCase )
__snake_case : Tuple = True
__snake_case : str = state_dict['decoder.layers.0.fc1.weight'].shape[0]
__snake_case : Tuple = WhisperConfig(
vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=__UpperCAmelCase , decoder_ffn_dim=__UpperCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , )
__snake_case : Dict = WhisperForConditionalGeneration(__UpperCAmelCase )
__snake_case , __snake_case : Optional[Any] = model.model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0 and not set(__UpperCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
__snake_case : int = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__snake_case : List[str] = proj_out_weights
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__magic_name__ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 679 | import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : int ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__snake_case : Union[str, Any] = flax_key_tuple[:-1] + ('weight',)
__snake_case : Any = torch.permute(__UpperCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCAmelCase ):
# linear layer
__snake_case : List[Any] = flax_key_tuple[:-1] + ('weight',)
__snake_case : Optional[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__snake_case : int = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] ):
if "metadata" in layer:
__snake_case : Tuple = layer.split('metadata' )
__snake_case : Any = ''.join(split_layer[0] )[:-1]
__snake_case : Union[str, Any] = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
__snake_case : Any = layer.split('kvstore' )
__snake_case : List[str] = ''.join(split_layer[0] )[:-1]
__snake_case : Tuple = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
__snake_case : Dict = layer.split('/' )
__snake_case : Any = '/'.join(split_layer[:-1] )
__snake_case : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
__snake_case : Optional[Any] = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
__snake_case : int = 'file'
else:
__snake_case : Union[str, Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : int ):
__snake_case : Optional[Any] = rename_keys(__UpperCAmelCase )
__snake_case : List[Any] = {}
for k, v in current_block.items():
__snake_case : Any = v
__snake_case : Optional[Any] = new_current_block
torch.save(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = WEIGHTS_NAME ):
__snake_case : int = convert_file_size_to_int(__UpperCAmelCase )
__snake_case : Tuple = []
__snake_case : Any = {}
__snake_case : int = 0
__snake_case : Optional[int] = 0
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
__snake_case : Dict = serialization.msgpack_restore(fp.read() )['optimizer']['target']
__snake_case : int = flatten_dict(__UpperCAmelCase , sep='/' )
__snake_case : List[Any] = {}
for layer in checkpoint_info.keys():
__snake_case , __snake_case , __snake_case : Any = get_key_and_tensorstore_dict(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if curr_real_layer_name in all_layers:
__snake_case : Union[str, Any] = content
else:
__snake_case : List[Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__snake_case : List[str] = torch.tensor(__UpperCAmelCase )
__snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__snake_case , __snake_case : List[Any] = rename_base_flax_keys(tuple(key.split('/' ) ) , __UpperCAmelCase )
__snake_case : int = '/'.join(__UpperCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__snake_case : List[str] = os.path.join(
__UpperCAmelCase , weights_name.replace('.bin' , F"""-{len(__UpperCAmelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__UpperCAmelCase , __UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
__snake_case : int = {}
__snake_case : Optional[Any] = 0
__snake_case : Optional[Any] = raw_weights.to(getattr(__UpperCAmelCase , __UpperCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__snake_case : Optional[Any] = os.path.join(__UpperCAmelCase , weights_name.replace('.bin' , F"""-{len(__UpperCAmelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(__UpperCAmelCase , __UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__UpperCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__snake_case : Tuple = {}
__snake_case : Union[str, Any] = {}
for idx, shard in enumerate(__UpperCAmelCase ):
__snake_case : List[Any] = weights_name.replace(
'.bin' , F"""-{idx+1:05d}-of-{len(__UpperCAmelCase ):05d}.bin""" ) # len(sharded_state_dicts):05d}
__snake_case : List[str] = os.path.join(__UpperCAmelCase , weights_name.replace('.bin' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
__snake_case : List[Any] = shard
for key in shard:
__snake_case : Union[str, Any] = shard_file
# Add the metadata
__snake_case : Optional[Any] = {'total_size': total_size}
__snake_case : int = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , 'w' , encoding='utf-8' ) as f:
__snake_case : Tuple = json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + '\n'
f.write(__UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
__magic_name__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCAmelCase__( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__snake_case : int = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
__snake_case : List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
__snake_case : Optional[int] = TaTokenizer.from_pretrained('t5-small' )
__snake_case : Union[str, Any] = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
__snake_case : str = tokenizer(__UpperCAmelCase , return_tensors='pt' ).input_ids
__snake_case : List[str] = model.generate(__UpperCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 679 | import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__magic_name__ = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
__magic_name__ = {
'''169M''': 768,
'''430M''': 1_024,
'''1B5''': 2_048,
'''3B''': 2_560,
'''7B''': 4_096,
'''14B''': 5_120,
}
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Dict = list(state_dict.keys() )
for name in state_dict_keys:
__snake_case : Tuple = state_dict.pop(__UpperCAmelCase )
# emb -> embedding
if name.startswith('emb.' ):
__snake_case : Optional[int] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__snake_case : List[str] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__snake_case : Optional[Any] = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , __UpperCAmelCase )
# ffn -> feed_forward
__snake_case : int = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , __UpperCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__snake_case : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__snake_case : List[Any] = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__snake_case : List[str] = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__snake_case : str = 'rwkv.' + name
__snake_case : Any = weight
return state_dict
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__snake_case : List[Any] = 5_02_77
__snake_case : Tuple = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__snake_case : Union[str, Any] = PreTrainedTokenizerFast(tokenizer_file=__UpperCAmelCase )
__snake_case : Optional[int] = len(__UpperCAmelCase )
tokenizer.save_pretrained(__UpperCAmelCase )
# 2. Build the config
__snake_case : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__snake_case : int = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
__snake_case : Tuple = RwkvConfig(
vocab_size=__UpperCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__UpperCAmelCase )
# 3. Download model file then convert state_dict
__snake_case : Optional[int] = hf_hub_download(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Dict = torch.load(__UpperCAmelCase , map_location='cpu' )
__snake_case : List[str] = convert_state_dict(__UpperCAmelCase )
# 4. Split in shards and save
__snake_case , __snake_case : Optional[Any] = shard_checkpoint(__UpperCAmelCase )
for shard_file, shard in shards.items():
torch.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
if index is not None:
__snake_case : Any = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
# Save the index as well
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
__snake_case : Optional[int] = json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + '\n'
f.write(__UpperCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__snake_case : str = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__snake_case : str = torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__snake_case : Union[str, Any] = AutoModelForCausalLM.from_pretrained(__UpperCAmelCase )
model.push_to_hub(__UpperCAmelCase , max_shard_size='2GB' )
tokenizer.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
__magic_name__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 679 | def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | 1 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : List[Any]=False ):
__snake_case : List[Any] = OmegaConf.load(__UpperCAmelCase )
if display:
print(yaml.dump(OmegaConf.to_container(__UpperCAmelCase ) ) )
return config
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Dict=None ):
if conf_path is None:
__snake_case : Optional[int] = './model_checkpoints/vqgan_only.yaml'
__snake_case : List[Any] = load_config(__UpperCAmelCase , display=__UpperCAmelCase )
__snake_case : Tuple = VQModel(**config.model.params )
if ckpt_path is None:
__snake_case : Dict = './model_checkpoints/vqgan_only.pt'
__snake_case : Optional[Any] = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
if ".ckpt" in ckpt_path:
__snake_case : Tuple = sd['state_dict']
model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
model.to(__UpperCAmelCase )
del sd
return model
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ):
__snake_case , __snake_case , __snake_case : Optional[Any] = model.encode(__UpperCAmelCase )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
__snake_case : List[Any] = model.decode(__UpperCAmelCase )
return xrec
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int]=False ):
__snake_case , __snake_case : List[str] = string.rsplit('.' , 1 )
if reload:
__snake_case : Dict = importlib.import_module(__UpperCAmelCase )
importlib.reload(__UpperCAmelCase )
return getattr(importlib.import_module(__UpperCAmelCase , package=__UpperCAmelCase ) , cls )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if "target" not in config:
raise KeyError('Expected key `target` to instantiate.' )
return get_obj_from_str(config['target'] )(**config.get('params' , {} ) )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Any=True ):
__snake_case : Union[str, Any] = instantiate_from_config(__UpperCAmelCase )
if sd is not None:
model.load_state_dict(__UpperCAmelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ):
# load the specified checkpoint
if ckpt:
__snake_case : List[str] = torch.load(__UpperCAmelCase , map_location='cpu' )
__snake_case : Optional[Any] = pl_sd['global_step']
print(F"""loaded model from global step {global_step}.""" )
else:
__snake_case : str = {'state_dict': None}
__snake_case : List[Any] = None
__snake_case : str = load_model_from_config(config.model , pl_sd['state_dict'] , gpu=__UpperCAmelCase , eval_mode=__UpperCAmelCase )['model']
return model, global_step
| 679 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
__magic_name__ = {'''mobilebert-uncased''': 512}
__magic_name__ = {}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = MobileBertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
__snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCAmelCase ) != tokenize_chinese_chars
):
__snake_case : Union[str, Any] = getattr(_UpperCAmelCase , normalizer_state.pop('type' ) )
__snake_case : Tuple = do_lower_case
__snake_case : int = strip_accents
__snake_case : Optional[Any] = tokenize_chinese_chars
__snake_case : List[str] = normalizer_class(**_UpperCAmelCase )
__snake_case : Union[str, Any] = do_lower_case
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
__snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : Dict = [self.sep_token_id]
__snake_case : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : List[Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 679 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "char"
__UpperCAmelCase = "bpe"
__UpperCAmelCase = "wp"
__magic_name__ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = ["image_processor", "char_tokenizer"]
__UpperCAmelCase = "ViTImageProcessor"
__UpperCAmelCase = "MgpstrTokenizer"
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
__snake_case : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
__snake_case : str = kwargs.pop('feature_extractor' )
__snake_case : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__snake_case : List[str] = tokenizer
__snake_case : Tuple = AutoTokenizer.from_pretrained('gpt2' )
__snake_case : Tuple = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__snake_case : Optional[int] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None:
__snake_case : Union[str, Any] = self.char_tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__snake_case : Union[str, Any] = encodings['input_ids']
return inputs
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case , __snake_case , __snake_case : int = sequences
__snake_case : Optional[int] = char_preds.size(0 )
__snake_case , __snake_case : Tuple = self._decode_helper(_UpperCAmelCase , 'char' )
__snake_case , __snake_case : Tuple = self._decode_helper(_UpperCAmelCase , 'bpe' )
__snake_case , __snake_case : Union[str, Any] = self._decode_helper(_UpperCAmelCase , 'wp' )
__snake_case : List[Any] = []
__snake_case : str = []
for i in range(_UpperCAmelCase ):
__snake_case : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
__snake_case : List[str] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__snake_case : Optional[int] = scores.index(max(_UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__snake_case : Optional[int] = {}
__snake_case : List[str] = final_strs
__snake_case : Any = final_scores
__snake_case : Optional[int] = char_strs
__snake_case : str = bpe_strs
__snake_case : str = wp_strs
return out
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
if format == DecodeType.CHARACTER:
__snake_case : Optional[int] = self.char_decode
__snake_case : str = 1
__snake_case : str = '[s]'
elif format == DecodeType.BPE:
__snake_case : Optional[Any] = self.bpe_decode
__snake_case : Optional[Any] = 2
__snake_case : Optional[int] = '#'
elif format == DecodeType.WORDPIECE:
__snake_case : str = self.wp_decode
__snake_case : str = 102
__snake_case : Tuple = '[SEP]'
else:
raise ValueError(F"""Format {format} is not supported.""" )
__snake_case , __snake_case : Union[str, Any] = [], []
__snake_case : Optional[Any] = pred_logits.size(0 )
__snake_case : Optional[int] = pred_logits.size(1 )
__snake_case , __snake_case : str = pred_logits.topk(1 , dim=-1 , largest=_UpperCAmelCase , sorted=_UpperCAmelCase )
__snake_case : Union[str, Any] = preds_index.view(-1 , _UpperCAmelCase )[:, 1:]
__snake_case : Tuple = decoder(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = torch.nn.functional.softmax(_UpperCAmelCase , dim=2 ).max(dim=2 )
__snake_case : Optional[Any] = preds_max_prob[:, 1:]
for index in range(_UpperCAmelCase ):
__snake_case : Union[str, Any] = preds_str[index].find(_UpperCAmelCase )
__snake_case : Optional[int] = preds_str[index][:pred_eos]
__snake_case : str = preds_index[index].cpu().tolist()
__snake_case : Dict = pred_index.index(_UpperCAmelCase ) if eos_token in pred_index else -1
__snake_case : str = preds_max_prob[index][: pred_eos_index + 1]
__snake_case : Optional[int] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_UpperCAmelCase )
conf_scores.append(_UpperCAmelCase )
return dec_strs, conf_scores
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(_UpperCAmelCase )]
return decode_strs
def lowercase_ ( self , _UpperCAmelCase ):
return self.bpe_tokenizer.batch_decode(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(_UpperCAmelCase )]
return decode_strs
| 679 | from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | 1 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "EncodecFeatureExtractor"
__UpperCAmelCase = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : str = self.feature_extractor
__snake_case : Optional[Any] = False
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True ):
return self.tokenizer.get_decoder_prompt_ids(task=_UpperCAmelCase , language=_UpperCAmelCase , no_timestamps=_UpperCAmelCase )
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : str = kwargs.pop('audio' , _UpperCAmelCase )
__snake_case : Dict = kwargs.pop('sampling_rate' , _UpperCAmelCase )
__snake_case : Dict = kwargs.pop('text' , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
__snake_case : str = args[0]
__snake_case : int = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
__snake_case : Any = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
if audio is not None:
__snake_case : List[str] = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__snake_case : int = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
__snake_case : Union[str, Any] = audio_inputs['padding_mask']
return inputs
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__snake_case : Tuple = kwargs.pop('audio' , _UpperCAmelCase )
__snake_case : Optional[int] = kwargs.pop('padding_mask' , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
__snake_case : Optional[Any] = args[0]
__snake_case : List[Any] = args[1:]
if audio_values is not None:
return self._decode_audio(_UpperCAmelCase , padding_mask=_UpperCAmelCase )
else:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : Optional[int] = to_numpy(_UpperCAmelCase )
__snake_case , __snake_case , __snake_case : Optional[Any] = audio_values.shape
if padding_mask is None:
return list(_UpperCAmelCase )
__snake_case : int = to_numpy(_UpperCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__snake_case : Optional[Any] = seq_len - padding_mask.shape[-1]
__snake_case : Optional[int] = 1 - self.feature_extractor.padding_value
__snake_case : List[Any] = np.pad(_UpperCAmelCase , ((0, 0), (0, difference)) , 'constant' , constant_values=_UpperCAmelCase )
__snake_case : Tuple = audio_values.tolist()
for i in range(_UpperCAmelCase ):
__snake_case : int = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__snake_case : List[str] = sliced_audio.reshape(_UpperCAmelCase , -1 )
return audio_values
| 679 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 1 |
from importlib import import_module
from .logging import get_logger
__magic_name__ = get_logger(__name__)
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None ):
__snake_case : Tuple = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Dict = module._original_module if isinstance(_UpperCAmelCase , _PatchedModuleObj ) else module
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
__snake_case : Union[str, Any] = obj
__snake_case : List[str] = target
__snake_case : Tuple = new
__snake_case : Any = target.split('.' )[0]
__snake_case : Dict = {}
__snake_case : str = attrs or []
def __enter__( self ):
*__snake_case , __snake_case : Tuple = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Tuple = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__snake_case : Optional[int] = getattr(self.obj , _UpperCAmelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_UpperCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__snake_case : Union[str, Any] = obj_attr
# patch at top level
setattr(self.obj , _UpperCAmelCase , _PatchedModuleObj(_UpperCAmelCase , attrs=self.attrs ) )
__snake_case : str = getattr(self.obj , _UpperCAmelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_UpperCAmelCase , _UpperCAmelCase , _PatchedModuleObj(getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , attrs=self.attrs ) )
__snake_case : Any = getattr(_UpperCAmelCase , _UpperCAmelCase )
# finally set the target attribute
setattr(_UpperCAmelCase , _UpperCAmelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__snake_case : Tuple = getattr(import_module('.'.join(_UpperCAmelCase ) ) , _UpperCAmelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _UpperCAmelCase ) is attr_value:
__snake_case : List[Any] = getattr(self.obj , _UpperCAmelCase )
setattr(self.obj , _UpperCAmelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__snake_case : Optional[Any] = globals()['__builtins__'][target_attr]
setattr(self.obj , _UpperCAmelCase , self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self , *_UpperCAmelCase ):
for attr in list(self.original ):
setattr(self.obj , _UpperCAmelCase , self.original.pop(_UpperCAmelCase ) )
def lowercase_ ( self ):
self.__enter__()
self._active_patches.append(self )
def lowercase_ ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 679 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | 1 |
from __future__ import annotations
__magic_name__ = 1.6_021e-19 # units = C
def UpperCAmelCase__( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 1 |
import os
from distutils.util import strtobool
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ):
for e in env_keys:
__snake_case : Optional[int] = int(os.environ.get(__UpperCAmelCase , -1 ) )
if val >= 0:
return val
return default
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any=False ):
__snake_case : Tuple = os.environ.get(__UpperCAmelCase , str(__UpperCAmelCase ) )
return strtobool(__UpperCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]="no" ):
__snake_case : int = os.environ.get(__UpperCAmelCase , str(__UpperCAmelCase ) )
return value
| 679 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 1 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : Optional[int] = prime_factors(__UpperCAmelCase )
if is_square_free(__UpperCAmelCase ):
return -1 if len(__UpperCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , ):
__snake_case : Dict = parent
__snake_case : Any = batch_size
__snake_case : Optional[int] = image_size
__snake_case : List[Any] = patch_size
__snake_case : str = num_channels
__snake_case : Tuple = is_training
__snake_case : List[str] = use_labels
__snake_case : str = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : str = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : List[str] = type_sequence_label_size
__snake_case : Any = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case : Optional[int] = (image_size // patch_size) ** 2
__snake_case : List[Any] = num_patches + 1
def lowercase_ ( self ):
__snake_case : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Dict = FlaxViTModel(config=_UpperCAmelCase )
__snake_case : List[str] = model(_UpperCAmelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case : str = (self.image_size, self.image_size)
__snake_case : Dict = (self.patch_size, self.patch_size)
__snake_case : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = self.type_sequence_label_size
__snake_case : List[str] = FlaxViTForImageClassification(config=_UpperCAmelCase )
__snake_case : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case : Union[str, Any] = 1
__snake_case : Optional[int] = FlaxViTForImageClassification(_UpperCAmelCase )
__snake_case : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case : Union[str, Any] = model(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) ,
) : Any = config_and_inputs
__snake_case : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowercase_ ( self ):
__snake_case : Tuple = FlaxViTModelTester(self )
__snake_case : Any = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(_UpperCAmelCase )
__snake_case : Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Dict = [*signature.parameters.keys()]
__snake_case : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : str = model_class(_UpperCAmelCase )
@jax.jit
def model_jitted(_UpperCAmelCase , **_UpperCAmelCase ):
return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase )
with self.subTest('JIT Enabled' ):
__snake_case : Optional[int] = model_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__snake_case : Dict = model_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self ):
for model_class_name in self.all_model_classes:
__snake_case : int = model_class_name.from_pretrained('google/vit-base-patch16-224' )
__snake_case : Any = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 679 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 1 |
__magic_name__ = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__magic_name__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__magic_name__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 679 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 1 |
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 679 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 1 |
def UpperCAmelCase__( __UpperCAmelCase : list ):
def merge(__UpperCAmelCase : list , __UpperCAmelCase : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__UpperCAmelCase ) <= 1:
return collection
__snake_case : Optional[int] = len(__UpperCAmelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 679 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 1 |
import doctest
from collections import deque
import numpy as np
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self ):
__snake_case : List[Any] = [2, 1, 2, -1]
__snake_case : List[Any] = [1, 2, 3, 4]
def lowercase_ ( self ):
__snake_case : str = len(self.first_signal )
__snake_case : int = len(self.second_signal )
__snake_case : Dict = max(_UpperCAmelCase , _UpperCAmelCase )
# create a zero matrix of max_length x max_length
__snake_case : Dict = [[0] * max_length for i in range(_UpperCAmelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_UpperCAmelCase ):
__snake_case : List[str] = deque(self.second_signal )
rotated_signal.rotate(_UpperCAmelCase )
for j, item in enumerate(_UpperCAmelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
__snake_case : List[Any] = np.matmul(np.transpose(_UpperCAmelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_UpperCAmelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 679 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__magic_name__ = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__magic_name__ = {
'''RUCAIBox/mvp''': 1_024,
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = MvpTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
__snake_case : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
__snake_case : Optional[int] = getattr(_UpperCAmelCase , pre_tok_state.pop('type' ) )
__snake_case : List[str] = add_prefix_space
__snake_case : Optional[Any] = pre_tok_class(**_UpperCAmelCase )
__snake_case : int = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case : Optional[int] = 'post_processor'
__snake_case : str = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
__snake_case : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : Optional[int] = tuple(state['sep'] )
if "cls" in state:
__snake_case : Optional[int] = tuple(state['cls'] )
__snake_case : int = False
if state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space:
__snake_case : List[Any] = add_prefix_space
__snake_case : List[str] = True
if state.get('trim_offsets' , _UpperCAmelCase ) != trim_offsets:
__snake_case : Dict = trim_offsets
__snake_case : Tuple = True
if changes_to_apply:
__snake_case : Union[str, Any] = getattr(_UpperCAmelCase , state.pop('type' ) )
__snake_case : str = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
def lowercase_ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
__snake_case : Optional[int] = value
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__snake_case : str = kwargs.get('is_split_into_words' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__snake_case : Union[str, Any] = kwargs.get('is_split_into_words' , _UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : Union[str, Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
__snake_case : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : Optional[Any] = [self.sep_token_id]
__snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 679 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 1 |
import enum
import shutil
import sys
__magic_name__ , __magic_name__ = shutil.get_terminal_size()
__magic_name__ = {'''UP''': '''A''', '''DOWN''': '''B''', '''RIGHT''': '''C''', '''LEFT''': '''D'''}
class __SCREAMING_SNAKE_CASE ( enum.Enum):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]="" ):
sys.stdout.write(str(__UpperCAmelCase ) + end )
sys.stdout.flush()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any]="" ):
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , __UpperCAmelCase )
def UpperCAmelCase__( ):
forceWrite('\r' )
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : str ):
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def UpperCAmelCase__( ):
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase__( ):
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 679 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = ["image_processor", "tokenizer"]
__UpperCAmelCase = "BlipImageProcessor"
__UpperCAmelCase = "AutoTokenizer"
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Union[str, Any] = False
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Tuple = self.image_processor
def __call__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__snake_case : Union[str, Any] = self.tokenizer
__snake_case : Optional[int] = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
return text_encoding
# add pixel_values
__snake_case : Optional[Any] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase )
if text is not None:
__snake_case : int = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
else:
__snake_case : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(_UpperCAmelCase )
return encoding_image_processor
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.tokenizer.model_input_names
__snake_case : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 679 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__snake_case : Dict = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_UpperCAmelCase , cache_dir=_UpperCAmelCase )
__snake_case : List[str] = [t[-1] for t in os.walk(os.path.join(_UpperCAmelCase , os.listdir(_UpperCAmelCase )[0] , 'snapshots' ) )]
__snake_case : Dict = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case , __snake_case : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_UpperCAmelCase )
__snake_case : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__snake_case : Dict = jax.random.PRNGKey(0 )
__snake_case : List[Any] = 4
__snake_case : List[Any] = jax.device_count()
__snake_case : Union[str, Any] = num_samples * [prompt]
__snake_case : Tuple = pipeline.prepare_inputs(_UpperCAmelCase )
# shard inputs and rng
__snake_case : str = replicate(_UpperCAmelCase )
__snake_case : Any = jax.random.split(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[int] = shard(_UpperCAmelCase )
__snake_case : str = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
__snake_case : Dict = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_UpperCAmelCase ) == num_samples
def lowercase_ ( self ):
__snake_case , __snake_case : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_UpperCAmelCase )
__snake_case : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__snake_case : str = jax.random.PRNGKey(0 )
__snake_case : Dict = 50
__snake_case : Dict = jax.device_count()
__snake_case : Any = num_samples * [prompt]
__snake_case : List[str] = pipeline.prepare_inputs(_UpperCAmelCase )
# shard inputs and rng
__snake_case : int = replicate(_UpperCAmelCase )
__snake_case : Union[str, Any] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : str = shard(_UpperCAmelCase )
__snake_case : Optional[int] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def lowercase_ ( self ):
__snake_case , __snake_case : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase )
__snake_case : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__snake_case : Any = jax.random.PRNGKey(0 )
__snake_case : str = 50
__snake_case : int = jax.device_count()
__snake_case : Optional[Any] = num_samples * [prompt]
__snake_case : List[Any] = pipeline.prepare_inputs(_UpperCAmelCase )
# shard inputs and rng
__snake_case : List[Any] = replicate(_UpperCAmelCase )
__snake_case : Optional[Any] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : int = shard(_UpperCAmelCase )
__snake_case : Tuple = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def lowercase_ ( self ):
__snake_case , __snake_case : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
__snake_case : List[str] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__snake_case : int = jax.random.PRNGKey(0 )
__snake_case : Optional[int] = 50
__snake_case : int = jax.device_count()
__snake_case : Optional[Any] = num_samples * [prompt]
__snake_case : Optional[int] = pipeline.prepare_inputs(_UpperCAmelCase )
# shard inputs and rng
__snake_case : Dict = replicate(_UpperCAmelCase )
__snake_case : Optional[int] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Dict = shard(_UpperCAmelCase )
__snake_case : Any = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def lowercase_ ( self ):
__snake_case : Any = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , )
__snake_case , __snake_case : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
__snake_case : Tuple = scheduler.create_state()
__snake_case : Optional[int] = scheduler_state
__snake_case : List[str] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__snake_case : Dict = jax.random.PRNGKey(0 )
__snake_case : str = 50
__snake_case : Any = jax.device_count()
__snake_case : int = num_samples * [prompt]
__snake_case : List[str] = pipeline.prepare_inputs(_UpperCAmelCase )
# shard inputs and rng
__snake_case : Any = replicate(_UpperCAmelCase )
__snake_case : List[str] = jax.random.split(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Union[str, Any] = shard(_UpperCAmelCase )
__snake_case : Optional[int] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(_UpperCAmelCase , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def lowercase_ ( self ):
__snake_case : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
__snake_case : List[Any] = jax.device_count()
__snake_case : Tuple = num_samples * [prompt]
__snake_case : str = jax.random.split(jax.random.PRNGKey(0 ) , _UpperCAmelCase )
__snake_case , __snake_case : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase , )
__snake_case : Optional[Any] = replicate(_UpperCAmelCase )
__snake_case : Tuple = pipeline.prepare_inputs(_UpperCAmelCase )
__snake_case : Optional[int] = shard(_UpperCAmelCase )
__snake_case : Any = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__snake_case : Tuple = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__snake_case , __snake_case : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_UpperCAmelCase , use_memory_efficient_attention=_UpperCAmelCase , )
__snake_case : Union[str, Any] = replicate(_UpperCAmelCase )
__snake_case : List[Any] = pipeline.prepare_inputs(_UpperCAmelCase )
__snake_case : Tuple = shard(_UpperCAmelCase )
__snake_case : List[str] = pipeline(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__snake_case : Any = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 679 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | 1 |
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = ["torch", "transformers", "onnx"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowercase_ ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowercase_ ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = ["torch", "transformers", "onnx"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowercase_ ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowercase_ ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = ["torch", "transformers", "onnx"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowercase_ ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowercase_ ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = ["torch", "transformers", "onnx"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowercase_ ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowercase_ ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = ["torch", "transformers", "onnx"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowercase_ ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowercase_ ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class __SCREAMING_SNAKE_CASE ( metaclass=UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = ["torch", "transformers", "onnx"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowercase_ ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def lowercase_ ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 679 | import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679 | 1 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any]=0.999 , __UpperCAmelCase : str="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase : Dict ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__snake_case : Optional[Any] = []
for i in range(__UpperCAmelCase ):
__snake_case : str = i / num_diffusion_timesteps
__snake_case : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase = 2
@register_to_config
def __init__( self , _UpperCAmelCase = 1_000 , _UpperCAmelCase = 0.00085 , _UpperCAmelCase = 0.012 , _UpperCAmelCase = "linear" , _UpperCAmelCase = None , _UpperCAmelCase = "epsilon" , _UpperCAmelCase = "linspace" , _UpperCAmelCase = 0 , ):
if trained_betas is not None:
__snake_case : Union[str, Any] = torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__snake_case : int = torch.linspace(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case : str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case : Tuple = betas_for_alpha_bar(_UpperCAmelCase )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
__snake_case : str = 1.0 - self.betas
__snake_case : str = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if schedule_timesteps is None:
__snake_case : str = self.timesteps
__snake_case : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__snake_case : int = 1 if len(_UpperCAmelCase ) > 1 else 0
else:
__snake_case : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase ) else timestep
__snake_case : List[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase_ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , ):
__snake_case : Optional[int] = self.index_for_timestep(_UpperCAmelCase )
if self.state_in_first_order:
__snake_case : Optional[int] = self.sigmas[step_index]
else:
__snake_case : str = self.sigmas_interpol[step_index]
__snake_case : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
__snake_case : Dict = num_inference_steps
__snake_case : str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__snake_case : int = np.linspace(0 , num_train_timesteps - 1 , _UpperCAmelCase , dtype=_UpperCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__snake_case : List[str] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case : List[str] = (np.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(_UpperCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__snake_case : int = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case : Dict = (np.arange(_UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(_UpperCAmelCase )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
__snake_case : List[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__snake_case : Union[str, Any] = torch.from_numpy(np.log(_UpperCAmelCase ) ).to(_UpperCAmelCase )
__snake_case : List[str] = np.interp(_UpperCAmelCase , np.arange(0 , len(_UpperCAmelCase ) ) , _UpperCAmelCase )
__snake_case : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__snake_case : List[Any] = torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase )
# interpolate sigmas
__snake_case : Any = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__snake_case : str = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__snake_case : Dict = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_UpperCAmelCase ).startswith('mps' ):
# mps does not support float64
__snake_case : Optional[Any] = torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase , dtype=torch.floataa )
else:
__snake_case : List[str] = torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase )
# interpolate timesteps
__snake_case : List[str] = self.sigma_to_t(_UpperCAmelCase ).to(_UpperCAmelCase , dtype=timesteps.dtype )
__snake_case : Dict = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__snake_case : Optional[int] = torch.cat([timesteps[:1], interleaved_timesteps] )
__snake_case : Dict = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__snake_case : Union[str, Any] = defaultdict(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
# get log sigma
__snake_case : Any = sigma.log()
# get distribution
__snake_case : Union[str, Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__snake_case : List[str] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__snake_case : Dict = low_idx + 1
__snake_case : Tuple = self.log_sigmas[low_idx]
__snake_case : List[str] = self.log_sigmas[high_idx]
# interpolate sigmas
__snake_case : Dict = (low - log_sigma) / (low - high)
__snake_case : Optional[Any] = w.clamp(0 , 1 )
# transform interpolation to time range
__snake_case : Union[str, Any] = (1 - w) * low_idx + w * high_idx
__snake_case : Any = t.view(sigma.shape )
return t
@property
def lowercase_ ( self ):
return self.sample is None
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ):
__snake_case : List[Any] = self.index_for_timestep(_UpperCAmelCase )
# advance index counter by 1
__snake_case : Optional[int] = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__snake_case : List[Any] = self.sigmas[step_index]
__snake_case : Optional[int] = self.sigmas_interpol[step_index + 1]
__snake_case : Optional[int] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__snake_case : List[str] = self.sigmas[step_index - 1]
__snake_case : Union[str, Any] = self.sigmas_interpol[step_index]
__snake_case : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__snake_case : str = 0
__snake_case : Dict = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__snake_case : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
__snake_case : List[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__snake_case : str = sigma_hat if self.state_in_first_order else sigma_interpol
__snake_case : Tuple = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__snake_case : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__snake_case : Any = sigma_interpol - sigma_hat
# store for 2nd order step
__snake_case : Union[str, Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__snake_case : Optional[int] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__snake_case : Optional[Any] = sigma_next - sigma_hat
__snake_case : Optional[Any] = self.sample
__snake_case : Optional[int] = None
__snake_case : Tuple = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__snake_case : Tuple = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_UpperCAmelCase ):
# mps does not support float64
__snake_case : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__snake_case : Optional[int] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__snake_case : str = self.timesteps.to(original_samples.device )
__snake_case : Any = timesteps.to(original_samples.device )
__snake_case : Union[str, Any] = [self.index_for_timestep(_UpperCAmelCase , _UpperCAmelCase ) for t in timesteps]
__snake_case : Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__snake_case : Tuple = sigma.unsqueeze(-1 )
__snake_case : Tuple = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 679 | import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679 | 1 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__magic_name__ = '''Usage of script: script_name <size_of_canvas:int>'''
__magic_name__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : Tuple = [[False for i in range(__UpperCAmelCase )] for j in range(__UpperCAmelCase )]
return canvas
def UpperCAmelCase__( __UpperCAmelCase : list[list[bool]] ):
for i, row in enumerate(__UpperCAmelCase ):
for j, _ in enumerate(__UpperCAmelCase ):
__snake_case : int = bool(random.getrandbits(1 ) )
def UpperCAmelCase__( __UpperCAmelCase : list[list[bool]] ):
__snake_case : Any = np.array(__UpperCAmelCase )
__snake_case : Any = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__UpperCAmelCase ):
for c, pt in enumerate(__UpperCAmelCase ):
__snake_case : Any = __judge_point(
__UpperCAmelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__snake_case : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__snake_case : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCAmelCase__( __UpperCAmelCase : bool , __UpperCAmelCase : list[list[bool]] ):
__snake_case : Optional[Any] = 0
__snake_case : List[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__snake_case : List[Any] = pt
if pt:
if alive < 2:
__snake_case : List[Any] = False
elif alive == 2 or alive == 3:
__snake_case : List[str] = True
elif alive > 3:
__snake_case : int = False
else:
if alive == 3:
__snake_case : Dict = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__magic_name__ = int(sys.argv[1])
# main working structure of this module.
__magic_name__ = create_canvas(canvas_size)
seed(c)
__magic_name__ , __magic_name__ = plt.subplots()
fig.show()
__magic_name__ = ListedColormap(['''w''', '''k'''])
try:
while True:
__magic_name__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 679 | import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__magic_name__ = logging.get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ):
try:
with open(__UpperCAmelCase , 'rb' ) as flax_state_f:
__snake_case : int = from_bytes(__UpperCAmelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__UpperCAmelCase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__snake_case : Tuple = flatten_dict(jax.tree_util.tree_map(lambda __UpperCAmelCase : x.dtype == jnp.bfloataa , __UpperCAmelCase ) ).values()
if any(__UpperCAmelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__snake_case : Union[str, Any] = jax.tree_util.tree_map(
lambda __UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __UpperCAmelCase )
__snake_case : Optional[int] = ''
__snake_case : str = flatten_dict(__UpperCAmelCase , sep='.' )
__snake_case : Any = pt_model.state_dict()
# keep track of unexpected & missing keys
__snake_case : Tuple = []
__snake_case : Dict = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__snake_case : List[str] = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__snake_case : int = flax_key_tuple_array[:-1] + ['weight']
__snake_case : Union[str, Any] = jnp.transpose(__UpperCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__snake_case : str = flax_key_tuple_array[:-1] + ['weight']
__snake_case : List[Any] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__snake_case : Dict = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__UpperCAmelCase ):
__snake_case : str = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
__snake_case : Optional[Any] = '.'.join(__UpperCAmelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
__snake_case : List[Any] = np.asarray(__UpperCAmelCase ) if not isinstance(__UpperCAmelCase , np.ndarray ) else flax_tensor
__snake_case : Optional[Any] = torch.from_numpy(__UpperCAmelCase )
# remove from missing keys
missing_keys.remove(__UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__UpperCAmelCase )
pt_model.load_state_dict(__UpperCAmelCase )
# re-transform missing_keys to list
__snake_case : Tuple = list(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(__UpperCAmelCase ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
return pt_model
| 679 | def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__magic_name__ = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 1 |
def UpperCAmelCase__( __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 10_00 ):
__snake_case : List[Any] = 1
__snake_case : Any = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__snake_case : list[int] = []
__snake_case : List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__snake_case : Optional[int] = len(__UpperCAmelCase )
__snake_case : List[str] = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__snake_case : Union[str, Any] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 1 |
def UpperCAmelCase__( ):
__snake_case : int = []
__snake_case : str = 1
while len(__UpperCAmelCase ) < 1E6:
constant.append(str(__UpperCAmelCase ) )
i += 1
__snake_case : str = ''.join(__UpperCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 679 | from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ):
# Initialise PyTorch model
__snake_case : List[str] = LxmertConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : Optional[int] = LxmertForPreTraining(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 1 |
import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | 1 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = LayoutLMTokenizer
__UpperCAmelCase = LayoutLMTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
__snake_case : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self , **_UpperCAmelCase ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[str] = 'UNwant\u00E9d,running'
__snake_case : Tuple = 'unwanted, running'
return input_text, output_text
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.tokenizer_class(self.vocab_file )
__snake_case : Union[str, Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def lowercase_ ( self ):
pass
| 679 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 1 |
import numpy
# List of input, output pairs
__magic_name__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__magic_name__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
__magic_name__ = [2, 4, 1, 5]
__magic_name__ = len(train_data)
__magic_name__ = 0.009
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any]="train" ):
return calculate_hypothesis_value(__UpperCAmelCase , __UpperCAmelCase ) - output(
__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
__snake_case : Optional[int] = 0
for i in range(len(__UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any]=m ):
__snake_case : Optional[Any] = 0
for i in range(__UpperCAmelCase ):
if index == -1:
summation_value += _error(__UpperCAmelCase )
else:
summation_value += _error(__UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
__snake_case : List[str] = summation_of_cost_derivative(__UpperCAmelCase , __UpperCAmelCase ) / m
return cost_derivative_value
def UpperCAmelCase__( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__snake_case : Tuple = 0.000002
__snake_case : str = 0
__snake_case : List[str] = 0
while True:
j += 1
__snake_case : List[str] = [0, 0, 0, 0]
for i in range(0 , len(__UpperCAmelCase ) ):
__snake_case : Optional[int] = get_cost_derivative(i - 1 )
__snake_case : Any = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__UpperCAmelCase , __UpperCAmelCase , atol=__UpperCAmelCase , rtol=__UpperCAmelCase , ):
break
__snake_case : Tuple = temp_parameter_vector
print(('Number of iterations:', j) )
def UpperCAmelCase__( ):
for i in range(len(__UpperCAmelCase ) ):
print(('Actual output value:', output(__UpperCAmelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(__UpperCAmelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 679 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "donut-swin"
__UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=4 , _UpperCAmelCase=3 , _UpperCAmelCase=96 , _UpperCAmelCase=[2, 2, 6, 2] , _UpperCAmelCase=[3, 6, 12, 24] , _UpperCAmelCase=7 , _UpperCAmelCase=4.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : List[Any] = image_size
__snake_case : Optional[Any] = patch_size
__snake_case : Tuple = num_channels
__snake_case : Dict = embed_dim
__snake_case : Tuple = depths
__snake_case : int = len(_UpperCAmelCase )
__snake_case : List[Any] = num_heads
__snake_case : Optional[Any] = window_size
__snake_case : Dict = mlp_ratio
__snake_case : Optional[Any] = qkv_bias
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : int = drop_path_rate
__snake_case : Tuple = hidden_act
__snake_case : Optional[int] = use_absolute_embeddings
__snake_case : Tuple = layer_norm_eps
__snake_case : int = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case : Any = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
| 679 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 1 |
from __future__ import annotations
import pandas as pd
def UpperCAmelCase__( __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int ):
__snake_case : Dict = [0] * no_of_processes
__snake_case : Any = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__UpperCAmelCase ):
__snake_case : Dict = burst_time[i]
__snake_case : int = 0
__snake_case : Tuple = 0
__snake_case : Tuple = 9_99_99_99_99
__snake_case : List[str] = 0
__snake_case : Tuple = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__UpperCAmelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__snake_case : List[str] = remaining_time[j]
__snake_case : str = j
__snake_case : List[str] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__snake_case : Tuple = remaining_time[short]
if minm == 0:
__snake_case : Union[str, Any] = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
__snake_case : Tuple = False
# Find finish time of current process
__snake_case : List[Any] = increment_time + 1
# Calculate waiting time
__snake_case : str = finish_time - arrival_time[short]
__snake_case : List[Any] = finar - burst_time[short]
if waiting_time[short] < 0:
__snake_case : Any = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase__( __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[int] ):
__snake_case : Optional[Any] = [0] * no_of_processes
for i in range(__UpperCAmelCase ):
__snake_case : Optional[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase__( __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int ):
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
for i in range(__UpperCAmelCase ):
__snake_case : Dict = total_waiting_time + waiting_time[i]
__snake_case : Any = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
__magic_name__ = int(input())
__magic_name__ = [0] * no_of_processes
__magic_name__ = [0] * no_of_processes
__magic_name__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
__magic_name__ , __magic_name__ = map(int, input().split())
__magic_name__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__magic_name__ = burst_time
__magic_name__ = no_of_processes
__magic_name__ = waiting_time
__magic_name__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__magic_name__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 679 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 1 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = XLMProphetNetTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Optional[Any] = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = '[PAD]'
__snake_case : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_UpperCAmelCase ) , 1_012 )
def lowercase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def lowercase_ ( self ):
__snake_case : Any = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__snake_case : Tuple = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__snake_case : List[str] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def lowercase_ ( self ):
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def lowercase_ ( self ):
__snake_case : Optional[Any] = 'Hello World!'
__snake_case : Any = [35_389, 6_672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowercase_ ( self ):
# fmt: off
__snake_case : List[Any] = {'input_ids': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 679 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 1 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''T5Config'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "mt5"
__UpperCAmelCase = MTaConfig
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "mt5"
__UpperCAmelCase = MTaConfig
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "mt5"
__UpperCAmelCase = MTaConfig
| 679 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 1 |
import collections
import importlib.util
import os
import re
from pathlib import Path
__magic_name__ = '''src/transformers'''
# Matches is_xxx_available()
__magic_name__ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__magic_name__ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__magic_name__ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__magic_name__ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__magic_name__ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__magic_name__ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__magic_name__ = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__magic_name__ = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__magic_name__ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__magic_name__ = re.compile(r'''^\s*try:''')
# Catches a line with else:
__magic_name__ = re.compile(r'''^\s*else:''')
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if _re_test_backend.search(__UpperCAmelCase ) is None:
return None
__snake_case : Optional[int] = [b[0] for b in _re_backend.findall(__UpperCAmelCase )]
backends.sort()
return "_and_".join(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
with open(__UpperCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__snake_case : List[Any] = f.readlines()
__snake_case : Any = 0
while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
__snake_case : Any = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__snake_case : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCAmelCase ):
__snake_case : List[str] = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0]
__snake_case : List[Any] = re.findall('\[([^\]]+)\]' , __UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__snake_case : Union[str, Any] = _re_import_struct_key_value.search(__UpperCAmelCase )
if single_line_import_search is not None:
__snake_case : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__snake_case : Optional[int] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__snake_case : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__snake_case : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__snake_case : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__snake_case : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None:
__snake_case : int = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(', ' )
__snake_case : Union[str, Any] = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_between_brackets.search(__UpperCAmelCase ) is not None:
__snake_case : Any = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(', ' )
__snake_case : str = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_quote_object.search(__UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__snake_case : int = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__snake_case : Optional[Any] = []
while (
line_index < len(__UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__snake_case : Optional[Any] = lines[line_index]
__snake_case : Optional[int] = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__snake_case : str = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
__snake_case : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__snake_case : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__snake_case : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__snake_case : List[str] = lines[line_index]
__snake_case : Optional[int] = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__snake_case : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
def find_duplicates(__UpperCAmelCase : Union[str, Any] ):
return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__snake_case : Union[str, Any] = []
for key in import_dict_objects.keys():
__snake_case : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__snake_case : Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__snake_case : Dict = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def UpperCAmelCase__( ):
__snake_case : Optional[int] = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
__snake_case : List[Any] = os.path.join(__UpperCAmelCase , '__init__.py' )
__snake_case : Any = parse_init(__UpperCAmelCase )
if objects is not None:
__snake_case : int = analyze_results(*__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
__snake_case : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
raise ValueError('\n\n'.join(__UpperCAmelCase ) )
def UpperCAmelCase__( ):
__snake_case : str = []
for path, directories, files in os.walk(__UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(__UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCAmelCase ) / folder).glob('*.py' ) ) ) == 0:
continue
__snake_case : Dict = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) )
__snake_case : Optional[int] = short_path.replace(os.path.sep , '.' )
submodules.append(__UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
__snake_case : Dict = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) )
__snake_case : Optional[int] = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(__UpperCAmelCase )
return submodules
__magic_name__ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def UpperCAmelCase__( ):
# This is to make sure the transformers module imported is the one in the repo.
__snake_case : int = importlib.util.spec_from_file_location(
'transformers' , os.path.join(__UpperCAmelCase , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__snake_case : Any = spec.loader.load_module()
__snake_case : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__UpperCAmelCase ) > 0:
__snake_case : List[str] = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 679 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 1 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = (DDIMParallelScheduler,)
__UpperCAmelCase = (("eta", 0.0), ("num_inference_steps", 5_0))
def lowercase_ ( self , **_UpperCAmelCase ):
__snake_case : Union[str, Any] = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**_UpperCAmelCase )
return config
def lowercase_ ( self , **_UpperCAmelCase ):
__snake_case : int = self.scheduler_classes[0]
__snake_case : Optional[Any] = self.get_scheduler_config(**_UpperCAmelCase )
__snake_case : List[Any] = scheduler_class(**_UpperCAmelCase )
__snake_case , __snake_case : List[str] = 10, 0.0
__snake_case : Optional[int] = self.dummy_model()
__snake_case : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for t in scheduler.timesteps:
__snake_case : Optional[Any] = model(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Union[str, Any] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def lowercase_ ( self ):
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowercase_ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
__snake_case : List[str] = self.scheduler_classes[0]
__snake_case : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
__snake_case : int = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowercase_ ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowercase_ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowercase_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowercase_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def lowercase_ ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_UpperCAmelCase )
def lowercase_ ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_UpperCAmelCase )
def lowercase_ ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def lowercase_ ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_UpperCAmelCase )
def lowercase_ ( self ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase )
def lowercase_ ( self ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_UpperCAmelCase , eta=_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.scheduler_classes[0]
__snake_case : List[Any] = self.get_scheduler_config()
__snake_case : Tuple = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def lowercase_ ( self ):
__snake_case : Tuple = self.scheduler_classes[0]
__snake_case : List[Any] = self.get_scheduler_config()
__snake_case : str = scheduler_class(**_UpperCAmelCase )
__snake_case , __snake_case : List[Any] = 10, 0.0
scheduler.set_timesteps(_UpperCAmelCase )
__snake_case : Optional[Any] = self.dummy_model()
__snake_case : Any = self.dummy_sample_deter
__snake_case : Optional[int] = self.dummy_sample_deter + 0.1
__snake_case : int = self.dummy_sample_deter - 0.1
__snake_case : List[Any] = samplea.shape[0]
__snake_case : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
__snake_case : Union[str, Any] = torch.arange(_UpperCAmelCase )[0:3, None].repeat(1 , _UpperCAmelCase )
__snake_case : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__snake_case : int = scheduler.batch_step_no_noise(_UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _UpperCAmelCase )
__snake_case : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : Dict = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.full_loop()
__snake_case : Dict = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def lowercase_ ( self ):
__snake_case : Any = self.full_loop(prediction_type='v_prediction' )
__snake_case : str = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : List[str] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def lowercase_ ( self ):
# We specify different beta, so that the first alpha is 0.99
__snake_case : Union[str, Any] = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
__snake_case : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : Optional[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def lowercase_ ( self ):
# We specify different beta, so that the first alpha is 0.99
__snake_case : Optional[Any] = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
__snake_case : List[str] = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 679 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 1 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
__snake_case : Any = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Any = logging.get_verbosity()
__snake_case : Dict = logging.get_logger('transformers.models.bart.tokenization_bart' )
__snake_case : List[str] = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_UpperCAmelCase ) as cl:
logger.warning(_UpperCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_UpperCAmelCase ) as cl:
logger.warning(_UpperCAmelCase )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_UpperCAmelCase ) as cl:
logger.warning(_UpperCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(_UpperCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def lowercase_ ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case : Optional[Any] = logging.get_logger('transformers.models.bart.tokenization_bart' )
__snake_case : Optional[Any] = os.getenv('TRANSFORMERS_VERBOSITY' , _UpperCAmelCase )
__snake_case : List[Any] = logging.log_levels[env_level_str]
__snake_case : Optional[int] = logging.get_verbosity()
self.assertEqual(
_UpperCAmelCase , _UpperCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
__snake_case : Union[str, Any] = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def lowercase_ ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__snake_case : Optional[int] = logging.logging.getLogger()
with CaptureLogger(_UpperCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def lowercase_ ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__snake_case : str = logging.get_logger('transformers.models.bart.tokenization_bart' )
__snake_case : Dict = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(_UpperCAmelCase ) as cl:
logger.warning_advice(_UpperCAmelCase )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_UpperCAmelCase ) as cl:
logger.warning_advice(_UpperCAmelCase )
self.assertEqual(cl.out , msg + '\n' )
def UpperCAmelCase__( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 679 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__snake_case : List[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self , _UpperCAmelCase = 1 , _UpperCAmelCase = None , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 50 , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , _UpperCAmelCase ):
__snake_case : Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__snake_case : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__snake_case : List[str] = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__snake_case : Optional[int] = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__snake_case : List[Any] = self.scheduler.step(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , eta=_UpperCAmelCase , use_clipped_model_output=_UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
__snake_case : Dict = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case : List[str] = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 679 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__magic_name__ = (3, 9, -11, 0, 7, 5, 1, -1)
__magic_name__ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Node | None = None
for i in sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ):
__snake_case : Tuple = Node(_UpperCAmelCase , self.head )
def __iter__( self ):
__snake_case : Dict = self.head
while node:
yield node.data
__snake_case : List[Any] = node.next_node
def __len__( self ):
return sum(1 for _ in self )
def __str__( self ):
return " -> ".join([str(_UpperCAmelCase ) for node in self] )
def UpperCAmelCase__( __UpperCAmelCase : SortedLinkedList , __UpperCAmelCase : SortedLinkedList ):
return SortedLinkedList(list(__UpperCAmelCase ) + list(__UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 679 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | 1 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__magic_name__ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__magic_name__ = parser.parse_args()
__magic_name__ = '''cpu'''
__magic_name__ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__magic_name__ = '''path-to-your-trained-model'''
__magic_name__ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__magic_name__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__magic_name__ = pipe.to(device)
# to channels last
__magic_name__ = pipe.unet.to(memory_format=torch.channels_last)
__magic_name__ = pipe.vae.to(memory_format=torch.channels_last)
__magic_name__ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__magic_name__ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__magic_name__ = torch.randn(2, 4, 64, 64)
__magic_name__ = torch.rand(1) * 999
__magic_name__ = torch.randn(2, 77, 768)
__magic_name__ = (sample, timestep, encoder_hidden_status)
try:
__magic_name__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__magic_name__ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__magic_name__ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__magic_name__ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__magic_name__ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__magic_name__ = 666
__magic_name__ = torch.Generator(device).manual_seed(seed)
__magic_name__ = {'''generator''': generator}
if args.steps is not None:
__magic_name__ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__magic_name__ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 679 | import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
__snake_case : List[Any] = torch.load(__UpperCAmelCase , map_location='cpu' )
if "model" in sd.keys():
__snake_case : List[Any] = torch.load(__UpperCAmelCase , map_location='cpu' )['model']
# pop unnecessary weights
__snake_case : List[Any] = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__UpperCAmelCase )
__snake_case : List[str] = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__snake_case : List[str] = sd.pop(__UpperCAmelCase )
__snake_case : Optional[int] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__snake_case : Dict = sd[key]
# We split QKV in separate Q,K,V
__snake_case : Dict = key.replace('.qkv_proj.' , '.q_proj.' )
__snake_case : Optional[Any] = key.replace('.qkv_proj.' , '.k_proj.' )
__snake_case : Optional[int] = key.replace('.qkv_proj.' , '.v_proj.' )
__snake_case : Optional[int] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__snake_case , __snake_case , __snake_case : Any = torch.split(__UpperCAmelCase , depth // 3 , dim=0 )
__snake_case : Tuple = q
__snake_case : Optional[Any] = k
__snake_case : Optional[Any] = v
del sd[key]
return sd
@torch.no_grad()
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any=None ):
__snake_case : Optional[Any] = load_checkpoint(__UpperCAmelCase )
if config is not None:
__snake_case : str = OPTConfig.from_pretrained(__UpperCAmelCase )
else:
__snake_case : str = OPTConfig()
__snake_case : List[Any] = OPTModel(__UpperCAmelCase ).half().eval()
model.load_state_dict(__UpperCAmelCase )
# Check results
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__magic_name__ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 679 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 1 |
from __future__ import annotations
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Union[str, Any] = order
# a_{0} ... a_{k}
__snake_case : Optional[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__snake_case : Dict = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__snake_case : List[Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
__snake_case : Any = [0.0] * self.order
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
if len(_UpperCAmelCase ) < self.order:
__snake_case : Union[str, Any] = [1.0, *a_coeffs]
if len(_UpperCAmelCase ) != self.order + 1:
__snake_case : List[str] = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_UpperCAmelCase )}"""
)
raise ValueError(_UpperCAmelCase )
if len(_UpperCAmelCase ) != self.order + 1:
__snake_case : str = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_UpperCAmelCase )}"""
)
raise ValueError(_UpperCAmelCase )
__snake_case : List[str] = a_coeffs
__snake_case : Optional[Any] = b_coeffs
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : int = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__snake_case : Optional[Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__snake_case : Dict = self.input_history[:-1]
__snake_case : List[Any] = self.output_history[:-1]
__snake_case : List[str] = sample
__snake_case : Dict = result
return result
| 679 | import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679 | 1 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Optional[int] = eval_examples
__snake_case : int = post_process_function
def lowercase_ ( self , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase = None , _UpperCAmelCase = "eval" , **_UpperCAmelCase , ):
__snake_case : Any = gen_kwargs.copy()
__snake_case : Optional[int] = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
__snake_case : Dict = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
__snake_case : str = gen_kwargs
__snake_case : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case : Optional[Any] = self.get_eval_dataloader(_UpperCAmelCase )
__snake_case : List[str] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case : int = self.compute_metrics
__snake_case : Any = None
__snake_case : int = time.time()
__snake_case : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case : Union[str, Any] = eval_loop(
_UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
__snake_case : Optional[int] = compute_metrics
__snake_case : Any = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case : Optional[Any] = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case : Any = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__snake_case : Dict = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
else:
__snake_case : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase = "test" , **_UpperCAmelCase ):
__snake_case : List[Any] = gen_kwargs.copy()
__snake_case : Optional[int] = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case : List[Any] = self.compute_metrics
__snake_case : str = None
__snake_case : Any = time.time()
__snake_case : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__snake_case : Dict = eval_loop(
_UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
__snake_case : Dict = compute_metrics
__snake_case : str = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case : int = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 'predict' )
__snake_case : List[Any] = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__snake_case : List[str] = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
| 679 | import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "beit"
def __init__( self , _UpperCAmelCase=8_192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Optional[int] = hidden_act
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : List[str] = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = image_size
__snake_case : Any = patch_size
__snake_case : List[str] = num_channels
__snake_case : List[str] = use_mask_token
__snake_case : Tuple = use_absolute_position_embeddings
__snake_case : Optional[int] = use_relative_position_bias
__snake_case : Union[str, Any] = use_shared_relative_position_bias
__snake_case : str = layer_scale_init_value
__snake_case : int = drop_path_rate
__snake_case : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
__snake_case : List[str] = out_indices
__snake_case : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__snake_case : Optional[Any] = use_auxiliary_head
__snake_case : Union[str, Any] = auxiliary_loss_weight
__snake_case : Any = auxiliary_channels
__snake_case : Union[str, Any] = auxiliary_num_convs
__snake_case : str = auxiliary_concat_input
__snake_case : List[str] = semantic_loss_ignore_index
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = version.parse("1.11")
@property
def lowercase_ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase_ ( self ):
return 1E-4
| 679 | def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | 1 |
def UpperCAmelCase__( __UpperCAmelCase : int ):
if n == 1 or not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
__snake_case : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : List[Any] = 0
__snake_case : Any = 2
while digits < n:
index += 1
__snake_case : Optional[Any] = len(str(fibonacci(__UpperCAmelCase ) ) )
return index
def UpperCAmelCase__( __UpperCAmelCase : int = 10_00 ):
return fibonacci_digits_index(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 679 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "roberta"
def __init__( self , _UpperCAmelCase=50_265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : Any = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Optional[int] = hidden_act
__snake_case : str = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : int = initializer_range
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : int = position_embedding_type
__snake_case : int = use_cache
__snake_case : str = classifier_dropout
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
@property
def lowercase_ ( self ):
if self.task == "multiple-choice":
__snake_case : str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 679 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 1 |
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : float ):
return round(float(moles / volume ) * nfactor )
def UpperCAmelCase__( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def UpperCAmelCase__( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def UpperCAmelCase__( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | 1 |
def UpperCAmelCase__( __UpperCAmelCase : float , __UpperCAmelCase : float ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
__magic_name__ = parser.parse_args()
__magic_name__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__magic_name__ = CLIPImageProcessor()
__magic_name__ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
__magic_name__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 679 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = ["pixel_values"]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : str = size if size is not None else {'shortest_edge': 224}
__snake_case : List[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__snake_case : List[str] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__snake_case : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name='crop_size' )
__snake_case : Optional[Any] = do_resize
__snake_case : Dict = size
__snake_case : Optional[int] = resample
__snake_case : Union[str, Any] = do_center_crop
__snake_case : Optional[Any] = crop_size
__snake_case : List[str] = do_rescale
__snake_case : Optional[Any] = rescale_factor
__snake_case : Union[str, Any] = do_normalize
__snake_case : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case : Optional[int] = do_convert_rgb
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[str] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__snake_case : Tuple = get_resize_output_image_size(_UpperCAmelCase , size=size['shortest_edge'] , default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : str = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
__snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize
__snake_case : str = size if size is not None else self.size
__snake_case : Optional[int] = get_size_dict(_UpperCAmelCase , param_name='size' , default_to_square=_UpperCAmelCase )
__snake_case : Tuple = resample if resample is not None else self.resample
__snake_case : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__snake_case : int = get_size_dict(_UpperCAmelCase , param_name='crop_size' , default_to_square=_UpperCAmelCase )
__snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : List[Any] = image_mean if image_mean is not None else self.image_mean
__snake_case : Any = image_std if image_std is not None else self.image_std
__snake_case : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case : Union[str, Any] = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case : Optional[int] = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__snake_case : Dict = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__snake_case : List[Any] = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
__snake_case : Optional[Any] = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
if do_rescale:
__snake_case : Optional[int] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__snake_case : Any = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__snake_case : List[str] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__snake_case : Dict = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 679 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : int = data
__snake_case : Tuple = self
__snake_case : Tuple = 0
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
# map from node name to the node object
__snake_case : dict[T, DisjointSetTreeNode[T]] = {}
def lowercase_ ( self , _UpperCAmelCase ):
# create a new set with x as its member
__snake_case : Optional[Any] = DisjointSetTreeNode(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
# find the set x belongs to (with path-compression)
__snake_case : Tuple = self.map[data]
if elem_ref != elem_ref.parent:
__snake_case : Dict = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# helper function for union operation
if nodea.rank > nodea.rank:
__snake_case : List[str] = nodea
else:
__snake_case : Optional[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# merge 2 disjoint sets
self.link(self.find_set(_UpperCAmelCase ) , self.find_set(_UpperCAmelCase ) )
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
__snake_case : dict[T, dict[T, int]] = {}
def lowercase_ ( self , _UpperCAmelCase ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
__snake_case : Any = {}
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# add an edge with the given weight
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
__snake_case : str = weight
__snake_case : str = weight
def lowercase_ ( self ):
__snake_case : List[str] = []
__snake_case : List[str] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _UpperCAmelCase : x[2] )
# creating the disjoint set
__snake_case : Union[str, Any] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_UpperCAmelCase )
# MST generation
__snake_case : str = 0
__snake_case : Union[str, Any] = 0
__snake_case : str = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__snake_case , __snake_case , __snake_case : Optional[int] = edges[index]
index += 1
__snake_case : Tuple = disjoint_set.find_set(_UpperCAmelCase )
__snake_case : Any = disjoint_set.find_set(_UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
disjoint_set.union(_UpperCAmelCase , _UpperCAmelCase )
return graph
| 679 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 1 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase="None" , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : str = seq_length
__snake_case : Tuple = is_training
__snake_case : Optional[int] = use_input_mask
__snake_case : Union[str, Any] = use_token_type_ids
__snake_case : Any = use_labels
__snake_case : Optional[Any] = vocab_size
__snake_case : Dict = hidden_size
__snake_case : Optional[Any] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : int = intermediate_size
__snake_case : List[Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Dict = initializer_range
__snake_case : str = num_labels
__snake_case : Any = num_choices
__snake_case : Any = relative_attention
__snake_case : List[Any] = position_biased_input
__snake_case : Tuple = pos_att_type
__snake_case : List[Any] = scope
def lowercase_ ( self ):
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : List[Any] = None
if self.use_token_type_ids:
__snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Dict = None
__snake_case : Tuple = None
__snake_case : str = None
if self.use_labels:
__snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : int = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase_ ( self , _UpperCAmelCase ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = DebertaVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )[0]
__snake_case : List[str] = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )[0]
__snake_case : str = model(_UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = DebertaVaForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : Any = DebertaVaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = DebertaVaForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = DebertaVaForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Dict = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = DebertaVaForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Union[str, Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Optional[int] = config_and_inputs
__snake_case : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = DebertaVaModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
def lowercase_ ( self ):
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[Any] = DebertaVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def lowercase_ ( self ):
pass
@slow
def lowercase_ ( self ):
__snake_case : Optional[Any] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__snake_case : Optional[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__snake_case : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : List[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
# compare the actual values for a slice.
__snake_case : Dict = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 679 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 1 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 1 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__UpperCAmelCase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def UpperCAmelCase__( __UpperCAmelCase : Any ):
__snake_case : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__snake_case : str = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
__snake_case : Union[str, Any] = PipelineDataFormat.from_str(
format=__UpperCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__UpperCAmelCase , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[str] = nlp
__snake_case : Optional[Any] = reader
@staticmethod
def lowercase_ ( _UpperCAmelCase ):
__snake_case : Optional[Any] = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=_UpperCAmelCase , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=_UpperCAmelCase , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=_UpperCAmelCase , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=_UpperCAmelCase , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=_UpperCAmelCase , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=_UpperCAmelCase , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=_UpperCAmelCase , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=_UpperCAmelCase , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case , __snake_case : int = self._nlp, []
for entry in self._reader:
__snake_case : Union[str, Any] = nlp(**_UpperCAmelCase ) if self._reader.is_multi_columns else nlp(_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
outputs.append(_UpperCAmelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__snake_case : Tuple = self._reader.save_binary(_UpperCAmelCase )
logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(_UpperCAmelCase )
| 679 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__magic_name__ = get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple=0 ):
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with FSDP.state_dict_type(
__UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case : Optional[Any] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case : Dict = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
__snake_case : List[Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case : Union[str, Any] = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__snake_case : Any = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case : Union[str, Any] = os.path.join(__UpperCAmelCase , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
logger.info(F"""Saving model to {ckpt_dir}""" )
__snake_case : Optional[Any] = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any]=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
__snake_case : str = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
__snake_case : Any = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Loading model from {input_model_file}""" )
__snake_case : List[Any] = torch.load(__UpperCAmelCase )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case : Tuple = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
__snake_case : Union[str, Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Loading model from {input_model_file}""" )
__snake_case : Tuple = torch.load(__UpperCAmelCase )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case : Tuple = (
os.path.join(__UpperCAmelCase , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
__snake_case : Union[str, Any] = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case : Union[str, Any] = state_dict['model']
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict=0 ):
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with FSDP.state_dict_type(
__UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case : Tuple = FSDP.optim_state_dict(__UpperCAmelCase , __UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case : Optional[int] = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__snake_case : Optional[int] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
__snake_case : str = os.path.join(__UpperCAmelCase , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case : Any = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case : Tuple = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
__snake_case : Dict = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
__snake_case : str = torch.load(__UpperCAmelCase )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
__snake_case : Optional[Any] = (
os.path.join(__UpperCAmelCase , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
__snake_case : str = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__UpperCAmelCase ) , )
__snake_case : Union[str, Any] = optim_state['optimizer']
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
__snake_case : Optional[Any] = FSDP.optim_state_dict_to_load(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
optimizer.load_state_dict(__UpperCAmelCase )
| 679 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
__snake_case : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
__snake_case : Dict = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
__snake_case : Tuple = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__snake_case : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
__snake_case : List[Any] = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(__UpperCAmelCase )-1}""" )
if "norm" in key:
__snake_case : Tuple = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__snake_case : Any = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
__snake_case : List[Any] = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(__UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
__snake_case : Union[str, Any] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
__snake_case : int = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
__snake_case : Optional[Any] = key[key.find('block' ) + len('block' )]
__snake_case : str = key.replace(F"""block{idx}""" , F"""block.{int(__UpperCAmelCase )-1}""" )
if "attn.q" in key:
__snake_case : List[str] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
__snake_case : str = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
__snake_case : Any = key.replace('attn' , 'attention.self' )
if "fc1" in key:
__snake_case : List[Any] = key.replace('fc1' , 'dense1' )
if "fc2" in key:
__snake_case : Tuple = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
__snake_case : str = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
__snake_case : str = key.replace('linear_fuse.conv' , 'linear_fuse' )
__snake_case : int = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__snake_case : Union[str, Any] = key[key.find('linear_c' ) + len('linear_c' )]
__snake_case : str = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(__UpperCAmelCase )-1}""" )
if "bot_conv" in key:
__snake_case : int = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
__snake_case : List[Any] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
__snake_case : Dict = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
__snake_case : Optional[Any] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
__snake_case : Any = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
__snake_case : int = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
__snake_case : Any = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
__snake_case : Dict = key.replace('module.last_layer_depth' , 'head.head' )
__snake_case : Optional[int] = value
return new_state_dict
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__snake_case : str = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
__snake_case : str = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
__snake_case : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
__snake_case : str = kv_bias[: config.hidden_sizes[i]]
__snake_case : str = kv_weight[
config.hidden_sizes[i] :, :
]
__snake_case : int = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase__( ):
__snake_case : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : Tuple = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Any=None ):
__snake_case : Union[str, Any] = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
__snake_case : List[str] = GLPNImageProcessor()
# prepare image
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
__snake_case : str = torch.load(__UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
__snake_case : Optional[int] = rename_keys(__UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(__UpperCAmelCase , __UpperCAmelCase )
# create HuggingFace model and load state dict
__snake_case : int = GLPNForDepthEstimation(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
# forward pass
__snake_case : Optional[Any] = model(__UpperCAmelCase )
__snake_case : List[str] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
__snake_case : Optional[Any] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
__snake_case : Optional[Any] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
__snake_case : List[str] = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , __UpperCAmelCase , atol=1E-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase , __UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCAmelCase , )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
__magic_name__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 679 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 1 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = DebertaVaTokenizer
__UpperCAmelCase = DebertaVaTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = True
def lowercase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = 'this is a test'
__snake_case : List[str] = 'this is a test'
return input_text, output_text
def lowercase_ ( self ):
__snake_case : str = '<pad>'
__snake_case : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(_UpperCAmelCase ) , 30_001 )
def lowercase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def lowercase_ ( self ):
# fmt: off
__snake_case : Optional[Any] = ' \tHeLLo!how \n Are yoU? '
__snake_case : List[Any] = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
__snake_case : List[Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
__snake_case : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : str = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
__snake_case : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def lowercase_ ( self ):
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# fmt: off
__snake_case : Dict = 'I was born in 92000, and this is falsé.'
__snake_case : List[Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__snake_case : Any = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
__snake_case : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
__snake_case : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
# fmt: off
__snake_case : Optional[int] = 'I was born in 92000, and this is falsé.'
__snake_case : str = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__snake_case : int = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
__snake_case : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
__snake_case : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
# fmt: off
__snake_case : Union[str, Any] = 'I was born in 92000, and this is falsé.'
__snake_case : Optional[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__snake_case : Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
__snake_case : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
# fmt: off
__snake_case : Tuple = 'I was born in 92000, and this is falsé.'
__snake_case : List[Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__snake_case : List[str] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
__snake_case : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
__snake_case : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
# fmt: off
__snake_case : List[Any] = ' \tHeLLo!how \n Are yoU? '
__snake_case : List[str] = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
__snake_case : Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
__snake_case : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
__snake_case : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_rust_tokenizer()
__snake_case : Dict = 'I was born in 92000, and this is falsé.'
__snake_case : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
__snake_case : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : int = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__snake_case : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Any = self.get_rust_tokenizer()
__snake_case : int = tokenizer.encode(_UpperCAmelCase )
__snake_case : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = 'This is a test'
__snake_case : Optional[Any] = [13, 1, 4_398, 25, 21, 1_289]
__snake_case : Optional[int] = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
__snake_case : Optional[int] = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
__snake_case : Any = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__snake_case : str = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[str] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[str] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# fmt: off
__snake_case : Optional[Any] = 'I was born in 92000, and this is falsé.'
__snake_case : Union[str, Any] = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
__snake_case : Tuple = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
__snake_case : str = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__snake_case : Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Dict = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : int = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Dict = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Dict = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = DebertaVaTokenizer(_UpperCAmelCase )
__snake_case : List[str] = tokenizer.encode('sequence builders' )
__snake_case : Dict = tokenizer.encode('multi-sequence build' )
__snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__snake_case : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , )
@slow
def lowercase_ ( self ):
# fmt: off
__snake_case : Dict = {'input_ids': [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 679 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 1 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."})
__UpperCAmelCase = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."})
__UpperCAmelCase = field(default=2 , metadata={"help": "Batch size for training."})
__UpperCAmelCase = field(default=2 , metadata={"help": "Batch size for evaluation."})
__UpperCAmelCase = field(default=0.1 , metadata={"help": "Value of weight decay."})
__UpperCAmelCase = field(
default=1_0_0_0_0 , metadata={"help": "Size of buffer used to shuffle streaming dataset."})
__UpperCAmelCase = field(default=2E-4 , metadata={"help": "Learning rate fo training."})
__UpperCAmelCase = field(default="cosine" , metadata={"help": "Learning rate."})
__UpperCAmelCase = field(
default=7_5_0 , metadata={"help": "Number of warmup steps in the learning rate schedule."})
__UpperCAmelCase = field(
default=1_6 , metadata={"help": "Number of gradient accumulation steps."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Use gradient checkpointing to reduce memory footprint."})
__UpperCAmelCase = field(default=5_0_0_0_0 , metadata={"help": "Maximum number of training steps."})
__UpperCAmelCase = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."})
__UpperCAmelCase = field(default=1_0_2_4 , metadata={"help": "Sequence lengths used for training."})
__UpperCAmelCase = field(default=1 , metadata={"help": "Training seed."})
__UpperCAmelCase = field(
default=1_0_2_4 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "States path if the training should continue from a checkpoint folder."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "If True the data is pretokenized."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."})
__UpperCAmelCase = field(default=2 , metadata={"help": "Batch size used for evaluation."})
__UpperCAmelCase = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."})
__UpperCAmelCase = field(default=1_0_2_4 , metadata={"help": "Length of sequences to be evaluated."})
__UpperCAmelCase = field(default=1 , metadata={"help": "Random seed used for evaluation."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Number of workers used for code evaluation."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Sample from the language model's output distribution."})
__UpperCAmelCase = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."})
__UpperCAmelCase = field(default=2_5_6 , metadata={"help": "Maximum number of newly generated tokens."})
__UpperCAmelCase = field(default=0 , metadata={"help": "Top-k parameter used for generation."})
__UpperCAmelCase = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."})
__UpperCAmelCase = field(default=1_0 , metadata={"help": "Number of generations to run in parallel."})
__UpperCAmelCase = field(
default=2_0_0 , metadata={"help": "Number of completions to generate for each sample."})
__UpperCAmelCase = field(default=1 , metadata={"help": "Random seed used for evaluation."})
__UpperCAmelCase = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."})
__UpperCAmelCase = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"})
__UpperCAmelCase = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
__UpperCAmelCase = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."})
__UpperCAmelCase = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."})
__UpperCAmelCase = field(
default=1_0_0_0_0_0 , metadata={"help": "Number of files to save per JSON output file."})
__UpperCAmelCase = field(default="content" , metadata={"help": "Column containing text data to process."})
__UpperCAmelCase = field(
default=1_0_0_0 , metadata={"help": "Maximum line length in file, otherwise file is filtered."})
__UpperCAmelCase = field(
default=1_0_0 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."})
__UpperCAmelCase = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."})
__UpperCAmelCase = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."})
__UpperCAmelCase = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "If True, near-duplicate samples are removed."})
__UpperCAmelCase = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."})
__UpperCAmelCase = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."})
__UpperCAmelCase = field(default="content" , metadata={"help": "Column containing text data to process."})
__UpperCAmelCase = field(default=2_0_0_0_0_0 , metadata={"help": "Number of examples to train tokenizer on."})
__UpperCAmelCase = field(
default=3_2_7_6_8 , metadata={"help": "Number of examples to train the tokenizer on."})
__UpperCAmelCase = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Push saved tokenizer to the hub."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."})
__UpperCAmelCase = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Number of workers used for code evaluation."})
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."})
__UpperCAmelCase = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."})
__UpperCAmelCase = field(default="codeparrot" , metadata={"help": "Name of the created model."})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Push saved tokenizer to the hub."})
| 679 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : Any = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__snake_case : List[str] = True if 'large' in model_name or 'huge' in model_name else False
__snake_case : Optional[Any] = True if 'large' in model_name or 'huge' in model_name else False
__snake_case : Any = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__snake_case : Any = [3, 3, 3, 3]
__snake_case : str = [5, 5, 5, 5]
elif "fl4" in model_name:
__snake_case : Optional[int] = [4, 4, 4, 4]
__snake_case : int = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__snake_case : Optional[int] = [3, 3, 3, 3]
if "lrf" in model_name:
__snake_case : Optional[int] = [3, 3, 3, 3]
else:
__snake_case : int = [2, 2, 2, 2]
if "tiny" in model_name:
__snake_case : List[Any] = 96
elif "small" in model_name:
__snake_case : int = 96
elif "base" in model_name:
__snake_case : Optional[Any] = 1_28
elif "large" in model_name:
__snake_case : Union[str, Any] = 1_92
elif "xlarge" in model_name:
__snake_case : int = 2_56
elif "huge" in model_name:
__snake_case : Any = 3_52
# set label information
__snake_case : Optional[Any] = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__snake_case : Union[str, Any] = 'imagenet-22k-id2label.json'
else:
__snake_case : List[str] = 'imagenet-1k-id2label.json'
__snake_case : Any = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
__snake_case : Any = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
__snake_case : List[str] = {v: k for k, v in idalabel.items()}
__snake_case : List[str] = FocalNetConfig(
embed_dim=__UpperCAmelCase , depths=__UpperCAmelCase , focal_levels=__UpperCAmelCase , focal_windows=__UpperCAmelCase , use_conv_embed=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase , use_post_layernorm=__UpperCAmelCase , use_layerscale=__UpperCAmelCase , )
return config
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if "patch_embed.proj" in name:
__snake_case : List[str] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__snake_case : Dict = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__snake_case : Union[str, Any] = 'encoder.' + name
if "encoder.layers" in name:
__snake_case : Optional[Any] = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__snake_case : Any = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__snake_case : Any = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__snake_case : Optional[int] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__snake_case : Union[str, Any] = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__snake_case : Any = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__snake_case : Any = 'layernorm.weight'
if name == "norm.bias":
__snake_case : List[Any] = 'layernorm.bias'
if "head" in name:
__snake_case : Dict = name.replace('head' , 'classifier' )
else:
__snake_case : Optional[int] = 'focalnet.' + name
return name
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Dict=False ):
# fmt: off
__snake_case : Any = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__snake_case : str = model_name_to_url[model_name]
print('Checkpoint URL: ' , __UpperCAmelCase )
__snake_case : int = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__snake_case : Optional[Any] = state_dict.pop(__UpperCAmelCase )
__snake_case : Any = val
__snake_case : Any = get_focalnet_config(__UpperCAmelCase )
__snake_case : List[str] = FocalNetForImageClassification(__UpperCAmelCase )
model.eval()
# load state dict
model.load_state_dict(__UpperCAmelCase )
# verify conversion
__snake_case : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : int = BitImageProcessor(
do_resize=__UpperCAmelCase , size={'shortest_edge': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCAmelCase , crop_size=2_24 , do_normalize=__UpperCAmelCase , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase , )
__snake_case : Any = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
__snake_case : Optional[int] = processor(images=__UpperCAmelCase , return_tensors='pt' )
__snake_case : Union[str, Any] = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case : Tuple = image_transforms(__UpperCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __UpperCAmelCase , atol=1E-4 )
__snake_case : Optional[Any] = model(**__UpperCAmelCase )
__snake_case : List[Any] = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__snake_case : Tuple = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
__snake_case : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
__snake_case : List[Any] = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
__snake_case : List[str] = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
__snake_case : Union[str, Any] = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
__snake_case : Tuple = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
__magic_name__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 679 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = hidden_act
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : int = share_encoders
__snake_case : Optional[Any] = projection_dim
| 679 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | 1 |
from __future__ import annotations
from typing import Any
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Union[str, Any] = num_of_nodes
__snake_case : list[list[int]] = []
__snake_case : dict[int, int] = {}
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase_ ( self , _UpperCAmelCase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase_ ( self , _UpperCAmelCase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__snake_case : List[str] = self.find_component(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if component_size[u_node] <= component_size[v_node]:
__snake_case : Any = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_UpperCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
__snake_case : Optional[int] = self.find_component(_UpperCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = []
__snake_case : Tuple = 0
__snake_case : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__snake_case : Optional[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__snake_case , __snake_case , __snake_case : Optional[int] = edge
__snake_case : Any = self.m_component[u]
__snake_case : Dict = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__snake_case : Union[str, Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case , __snake_case , __snake_case : List[str] = edge
__snake_case : str = self.m_component[u]
__snake_case : List[str] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__snake_case : int = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def UpperCAmelCase__( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__magic_name__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[Any]=None , ):
if attention_mask is None:
__snake_case : Tuple = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__snake_case : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__snake_case : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=0.02 , ):
__snake_case : Dict = parent
__snake_case : str = batch_size
__snake_case : Optional[Any] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : Optional[int] = use_labels
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Any = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : Any = eos_token_id
__snake_case : Union[str, Any] = pad_token_id
__snake_case : int = bos_token_id
__snake_case : Union[str, Any] = initializer_range
def lowercase_ ( self ):
__snake_case : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__snake_case : Optional[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__snake_case : List[Any] = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__snake_case : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , )
__snake_case : Optional[Any] = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def lowercase_ ( self ):
__snake_case , __snake_case : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[str] = 20
__snake_case : Tuple = model_class_name(_UpperCAmelCase )
__snake_case : Dict = model.encode(inputs_dict['input_ids'] )
__snake_case , __snake_case : Optional[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__snake_case : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__snake_case : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__snake_case : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case : Tuple = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__snake_case : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__snake_case : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , )
__snake_case : Dict = model.decode(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[str] = 20
__snake_case : List[str] = model_class_name(_UpperCAmelCase )
__snake_case : str = model.encode(inputs_dict['input_ids'] )
__snake_case , __snake_case : Any = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__snake_case : Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__snake_case : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
__snake_case : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__snake_case : str = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__snake_case : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__snake_case : Dict = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
__snake_case : Dict = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase )
__snake_case : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = 9_9
def lowercase_ ( self ):
__snake_case : int = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__snake_case : List[str] = input_ids.shape[0]
__snake_case : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self ):
__snake_case , __snake_case , __snake_case : int = self._get_config_and_data()
__snake_case : Tuple = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase )
__snake_case : Union[str, Any] = lm_model(input_ids=_UpperCAmelCase )
__snake_case : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__snake_case : List[Any] = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase )
__snake_case : int = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__snake_case : int = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__snake_case : List[str] = lm_model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
__snake_case : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__snake_case : Any = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
__snake_case : Any = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
__snake_case : List[Any] = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase , UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = True
__UpperCAmelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCAmelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self ):
__snake_case : List[str] = FlaxBlenderbotModelTester(self )
def lowercase_ ( self ):
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : List[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : Any = model_class(_UpperCAmelCase )
@jax.jit
def encode_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
with self.subTest('JIT Enabled' ):
__snake_case : Any = encode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__snake_case : Optional[int] = encode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self ):
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case : List[Any] = model_class(_UpperCAmelCase )
__snake_case : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__snake_case : int = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return model.decode(
decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , )
with self.subTest('JIT Enabled' ):
__snake_case : int = decode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__snake_case : Union[str, Any] = decode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self ):
for model_class_name in self.all_model_classes:
__snake_case : Any = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__snake_case : List[str] = np.ones((1, 1) ) * model.config.eos_token_id
__snake_case : Optional[int] = model(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def lowercase_ ( self ):
__snake_case : Union[str, Any] = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
__snake_case : Union[str, Any] = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
__snake_case : List[str] = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_UpperCAmelCase )
__snake_case : List[str] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
__snake_case : str = ['Sam']
__snake_case : int = tokenizer(_UpperCAmelCase , return_tensors='jax' )
__snake_case : Tuple = model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : List[str] = 'Sam is a great name. It means "sun" in Gaelic.'
__snake_case : Optional[int] = tokenizer.batch_decode(_UpperCAmelCase , **_UpperCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 679 | def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''CLIPFeatureExtractor''']
__magic_name__ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowercase_ ( self ):
if self.train_file is not None:
__snake_case : Union[str, Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case : List[str] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = True
__UpperCAmelCase = None
__UpperCAmelCase = None
def __call__( self , _UpperCAmelCase ):
__snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features]
__snake_case : List[Any] = len(_UpperCAmelCase )
__snake_case : Union[str, Any] = len(features[0]['input_ids'] )
__snake_case : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features
]
__snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) )
__snake_case : Optional[Any] = self.tokenizer.pad(
_UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa )
return batch
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__snake_case : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case : Optional[int] = {}
if data_args.train_file is not None:
__snake_case : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
__snake_case : int = data_args.validation_file
__snake_case : int = data_args.train_file.split('.' )[-1]
__snake_case : Tuple = load_dataset(
__UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case : Optional[int] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case : str = [F"""ending{i}""" for i in range(4 )]
__snake_case : Optional[Any] = 'sent1'
__snake_case : Tuple = 'sent2'
if data_args.max_seq_length is None:
__snake_case : List[Any] = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case : List[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__UpperCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]]
__snake_case : Union[str, Any] = examples[question_header_name]
__snake_case : Optional[int] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase )
]
# Flatten out
__snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) )
__snake_case : int = list(chain(*__UpperCAmelCase ) )
# Tokenize
__snake_case : Tuple = tokenizer(
__UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples )
__snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case : int = train_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case : Optional[Any] = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples )
__snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case : List[Any] = eval_dataset.map(
__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__UpperCAmelCase : int ):
__snake_case , __snake_case : Union[str, Any] = eval_predictions
__snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case : List[str] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : List[str] = last_checkpoint
__snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case : List[Any] = train_result.metrics
__snake_case : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('train' , __UpperCAmelCase )
trainer.save_metrics('train' , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics('eval' , __UpperCAmelCase )
trainer.save_metrics('eval' , __UpperCAmelCase )
__snake_case : List[Any] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 679 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "openai/whisper-base"
__UpperCAmelCase = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCAmelCase = "transcriber"
__UpperCAmelCase = WhisperProcessor
__UpperCAmelCase = WhisperForConditionalGeneration
__UpperCAmelCase = ["audio"]
__UpperCAmelCase = ["text"]
def lowercase_ ( self , _UpperCAmelCase ):
return self.pre_processor(_UpperCAmelCase , return_tensors='pt' ).input_features
def lowercase_ ( self , _UpperCAmelCase ):
return self.model.generate(inputs=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0]
| 679 | import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''▁'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
__snake_case : Optional[Any] = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
'do_convert_rgb': True,
}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self , **_UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , **_UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , **_UpperCAmelCase ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self ):
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self ):
__snake_case : Dict = self.get_tokenizer()
__snake_case : int = self.get_rust_tokenizer()
__snake_case : int = self.get_image_processor()
__snake_case : Union[str, Any] = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__snake_case : Dict = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
__snake_case : List[str] = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__snake_case : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case : str = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
__snake_case : Dict = self.get_image_processor(do_normalize=_UpperCAmelCase )
__snake_case : str = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=_UpperCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : str = self.get_image_processor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__snake_case : int = self.prepare_image_inputs()
__snake_case : Optional[int] = image_processor(_UpperCAmelCase , return_tensors='np' )
__snake_case : List[str] = processor(images=_UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase_ ( self ):
__snake_case : List[Any] = self.get_image_processor()
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Dict = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__snake_case : Optional[int] = 'Alexandra,T-shirt的价格是15便士。'
__snake_case : Tuple = processor(text=_UpperCAmelCase )
__snake_case : List[str] = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self ):
__snake_case : Dict = self.get_image_processor()
__snake_case : int = self.get_tokenizer()
__snake_case : Dict = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__snake_case : Optional[int] = 'Alexandra,T-shirt的价格是15便士。'
__snake_case : Any = self.prepare_image_inputs()
__snake_case : Union[str, Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowercase_ ( self ):
__snake_case : int = self.get_image_processor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Dict = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__snake_case : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case : List[Any] = processor.batch_decode(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.get_image_processor()
__snake_case : List[Any] = self.get_tokenizer()
__snake_case : Optional[Any] = ChineseCLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__snake_case : List[Any] = 'Alexandra,T-shirt的价格是15便士。'
__snake_case : int = self.prepare_image_inputs()
__snake_case : List[str] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 679 | def UpperCAmelCase__( __UpperCAmelCase : list ):
__snake_case : List[Any] = len(__UpperCAmelCase )
for _ in range(__UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__snake_case , __snake_case : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
__magic_name__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 679 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__magic_name__ = '''pt'''
elif is_tf_available():
__magic_name__ = '''tf'''
else:
__magic_name__ = '''jax'''
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = PerceiverTokenizer
__UpperCAmelCase = False
def lowercase_ ( self ):
super().setUp()
__snake_case : str = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase_ ( self ):
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def lowercase_ ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case : List[Any] = []
for i in range(len(_UpperCAmelCase ) ):
try:
__snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) )
__snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
__snake_case : List[str] = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
__snake_case : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case : List[Any] = [t[0] for t in toks]
# Ensure consistency
__snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
__snake_case : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
__snake_case : List[Any] = ' ' + output_txt
__snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def lowercase_ ( self ):
__snake_case : List[Any] = self.perceiver_tokenizer
__snake_case : Dict = 'Unicode €.'
__snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase )
__snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : int = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' )
__snake_case : Optional[Any] = tokenizer('e è é ê ë' )
__snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _UpperCAmelCase )
# decoding
__snake_case : str = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.perceiver_tokenizer
__snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
__snake_case : List[str] = list(batch.input_ids.numpy()[0] )
else:
__snake_case : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase_ ( self ):
__snake_case : Dict = self.perceiver_tokenizer
__snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _UpperCAmelCase )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertNotIn('decoder_input_ids' , _UpperCAmelCase )
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[str] = self.perceiver_tokenizer
__snake_case : Tuple = [
'Summary of the text.',
'Another summary.',
]
__snake_case : int = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowercase_ ( self ):
# safety check on max_len default value so we are sure the test works
__snake_case : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running'
__snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
__snake_case : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Tuple = tempfile.mkdtemp()
__snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__snake_case : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
__snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__snake_case : Any = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )]
__snake_case : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
__snake_case : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Optional[Any] = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )]
__snake_case : str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def lowercase_ ( self ):
__snake_case : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase)
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase_ ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "dict"
__UpperCAmelCase = None
__UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase)
def lowercase_ ( self ):
__snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None
__snake_case : Optional[Any] = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__snake_case : Any = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase_ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 679 | 1 |
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | from __future__ import annotations
__magic_name__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ):
__snake_case : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the reference grid
__snake_case : List[str] = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) )
] # the action grid
__snake_case : Dict = init[0]
__snake_case : List[str] = init[1]
__snake_case : Optional[Any] = 0
__snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Any = [[f, g, x, y]]
__snake_case : List[str] = False # flag that is set when search is complete
__snake_case : str = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCAmelCase ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : List[Any] = cell.pop()
__snake_case : Optional[int] = next_cell[2]
__snake_case : int = next_cell[3]
__snake_case : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Union[str, Any] = True
else:
for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions
__snake_case : Tuple = x + DIRECTIONS[i][0]
__snake_case : Tuple = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : List[str] = g + cost
__snake_case : Optional[Any] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Any = i
__snake_case : Tuple = []
__snake_case : Dict = goal[0]
__snake_case : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : Tuple = x - DIRECTIONS[action[x][y]][0]
__snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Tuple = xa
__snake_case : List[str] = ya
invpath.append([x, y] )
__snake_case : Dict = []
for i in range(len(__UpperCAmelCase ) ):
path.append(invpath[len(__UpperCAmelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__magic_name__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__magic_name__ = [0, 0]
# all coordinates are given in format [y,x]
__magic_name__ = [len(grid) - 1, len(grid[0]) - 1]
__magic_name__ = 1
# the cost map which pushes the path closer to the goal
__magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__magic_name__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__magic_name__ = 99
__magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 679 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'''vocab_file''': '''vocab.json'''}
__magic_name__ = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
__magic_name__ = {'''mgp-str''': 27}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="[GO]" , _UpperCAmelCase="[GO]" , _UpperCAmelCase="[s]" , _UpperCAmelCase="[GO]" , **_UpperCAmelCase ):
super().__init__(
unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle:
__snake_case : List[str] = json.load(_UpperCAmelCase )
__snake_case : Any = {v: k for k, v in self.vocab.items()}
@property
def lowercase_ ( self ):
return len(self.vocab )
def lowercase_ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = []
for s in text:
char_tokens.extend(_UpperCAmelCase )
return char_tokens
def lowercase_ ( self , _UpperCAmelCase ):
return self.vocab.get(_UpperCAmelCase , self.vocab.get(self.unk_token ) )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(_UpperCAmelCase ) )
return
__snake_case : Optional[int] = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + '\n' )
return (vocab_file,)
| 679 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_vision_model"
def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Any = intermediate_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : int = patch_size
__snake_case : Dict = image_size
__snake_case : Any = initializer_range
__snake_case : List[Any] = attention_dropout
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = hidden_act
__snake_case : int = qkv_bias
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : Any = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip_qformer"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : int = intermediate_size
__snake_case : str = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : Dict = initializer_range
__snake_case : Any = layer_norm_eps
__snake_case : Union[str, Any] = position_embedding_type
__snake_case : Optional[int] = cross_attention_frequency
__snake_case : Union[str, Any] = encoder_hidden_size
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__snake_case : List[Any] = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "instructblip"
__UpperCAmelCase = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
__snake_case : List[str] = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__snake_case : Union[str, Any] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__snake_case : str = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase )
__snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase )
__snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
__snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
__snake_case : List[Any] = self.text_config.tie_word_embeddings
__snake_case : Optional[int] = self.text_config.is_encoder_decoder
__snake_case : List[str] = num_query_tokens
__snake_case : Tuple = self.vision_config.hidden_size
__snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__snake_case : str = 1.0
__snake_case : Optional[int] = 0.02
@classmethod
def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Tuple = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.vision_config.to_dict()
__snake_case : List[Any] = self.qformer_config.to_dict()
__snake_case : Optional[int] = self.text_config.to_dict()
__snake_case : List[str] = self.__class__.model_type
return output
| 679 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''PerceiverFeatureExtractor''']
__magic_name__ = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 679 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : str ):
def get_masked_lm_array(__UpperCAmelCase : str ):
__snake_case : List[Any] = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Dict = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
if "kernel" in name:
__snake_case : str = array.transpose()
return torch.from_numpy(__UpperCAmelCase )
def get_encoder_array(__UpperCAmelCase : str ):
__snake_case : Any = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Optional[Any] = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
if "kernel" in name:
__snake_case : str = array.transpose()
return torch.from_numpy(__UpperCAmelCase )
def get_encoder_layer_array(__UpperCAmelCase : int , __UpperCAmelCase : str ):
__snake_case : Tuple = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : int = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
if "kernel" in name:
__snake_case : List[Any] = array.transpose()
return torch.from_numpy(__UpperCAmelCase )
def get_encoder_attention_layer_array(__UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Any ):
__snake_case : Any = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : int = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : int = array.reshape(__UpperCAmelCase )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(__UpperCAmelCase )
print(F"""Loading model based on config from {config_path}...""" )
__snake_case : int = BertConfig.from_json_file(__UpperCAmelCase )
__snake_case : Dict = BertForMaskedLM(__UpperCAmelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : Dict = get_encoder_attention_layer_array(
__UpperCAmelCase , '_query_dense/kernel' , self_attn.query.weight.data.shape )
__snake_case : Optional[Any] = get_encoder_attention_layer_array(
__UpperCAmelCase , '_query_dense/bias' , self_attn.query.bias.data.shape )
__snake_case : Optional[Any] = get_encoder_attention_layer_array(
__UpperCAmelCase , '_key_dense/kernel' , self_attn.key.weight.data.shape )
__snake_case : Optional[int] = get_encoder_attention_layer_array(
__UpperCAmelCase , '_key_dense/bias' , self_attn.key.bias.data.shape )
__snake_case : Optional[Any] = get_encoder_attention_layer_array(
__UpperCAmelCase , '_value_dense/kernel' , self_attn.value.weight.data.shape )
__snake_case : Union[str, Any] = get_encoder_attention_layer_array(
__UpperCAmelCase , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : List[Any] = get_encoder_attention_layer_array(
__UpperCAmelCase , '_output_dense/kernel' , self_output.dense.weight.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
__UpperCAmelCase , '_output_dense/bias' , self_output.dense.bias.data.shape )
__snake_case : List[Any] = get_encoder_layer_array(__UpperCAmelCase , '_attention_layer_norm/gamma' )
__snake_case : Dict = get_encoder_layer_array(__UpperCAmelCase , '_attention_layer_norm/beta' )
# Intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : Any = get_encoder_layer_array(__UpperCAmelCase , '_intermediate_dense/kernel' )
__snake_case : Dict = get_encoder_layer_array(__UpperCAmelCase , '_intermediate_dense/bias' )
# Output
__snake_case : BertOutput = layer.output
__snake_case : Any = get_encoder_layer_array(__UpperCAmelCase , '_output_dense/kernel' )
__snake_case : List[str] = get_encoder_layer_array(__UpperCAmelCase , '_output_dense/bias' )
__snake_case : Dict = get_encoder_layer_array(__UpperCAmelCase , '_output_layer_norm/gamma' )
__snake_case : Tuple = get_encoder_layer_array(__UpperCAmelCase , '_output_layer_norm/beta' )
# Embeddings
__snake_case : Dict = get_encoder_array('_position_embedding_layer/embeddings' )
__snake_case : Dict = get_encoder_array('_type_embedding_layer/embeddings' )
__snake_case : Dict = get_encoder_array('_embedding_norm_layer/gamma' )
__snake_case : Optional[int] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
__snake_case : Tuple = model.cls.predictions.transform
__snake_case : Dict = get_masked_lm_array('dense/kernel' )
__snake_case : Optional[int] = get_masked_lm_array('dense/bias' )
__snake_case : List[Any] = get_masked_lm_array('layer_norm/gamma' )
__snake_case : int = get_masked_lm_array('layer_norm/beta' )
__snake_case : List[Any] = get_masked_lm_array('embedding_table' )
# Pooling
__snake_case : Tuple = BertPooler(config=__UpperCAmelCase )
__snake_case : BertPooler = get_encoder_array('_pooler_layer/kernel' )
__snake_case : BertPooler = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(__UpperCAmelCase )
# Integration test - should load without any errors ;)
__snake_case : Tuple = BertForMaskedLM.from_pretrained(__UpperCAmelCase )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
__magic_name__ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 679 | import math
import os
import sys
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Union[str, Any] = ''
try:
with open(__UpperCAmelCase , 'rb' ) as binary_file:
__snake_case : Optional[Any] = binary_file.read()
for dat in data:
__snake_case : Tuple = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ):
lexicon.pop(__UpperCAmelCase )
__snake_case : Union[str, Any] = last_match_id
if math.loga(__UpperCAmelCase ).is_integer():
for curr_key in lexicon:
__snake_case : Tuple = '0' + lexicon[curr_key]
__snake_case : Any = bin(__UpperCAmelCase )[2:]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Tuple = {'0': '0', '1': '1'}
__snake_case , __snake_case : Optional[int] = '', ''
__snake_case : str = len(__UpperCAmelCase )
for i in range(len(__UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__snake_case : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
index += 1
__snake_case : Union[str, Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__snake_case : Any = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = os.path.getsize(__UpperCAmelCase )
__snake_case : List[Any] = bin(__UpperCAmelCase )[2:]
__snake_case : Any = len(__UpperCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : Tuple = 8
try:
with open(__UpperCAmelCase , 'wb' ) as opened_file:
__snake_case : int = [
to_write[i : i + byte_length]
for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ):
__snake_case : str = read_file_binary(__UpperCAmelCase )
__snake_case : Tuple = compress_data(__UpperCAmelCase )
__snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase )
write_file_binary(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 679 | 1 |
from __future__ import annotations
def UpperCAmelCase__( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679 | from itertools import permutations
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__snake_case : Any = [7, 11, 13, 17]
for i, test in enumerate(__UpperCAmelCase ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase__( __UpperCAmelCase : int = 10 ):
return sum(
int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) )
for num in permutations(range(__UpperCAmelCase ) )
if is_substring_divisible(__UpperCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 679 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''MobileViTFeatureExtractor''']
__magic_name__ = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 679 | # Function to print upper half of diamond (pyramid)
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(0 , __UpperCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(' ' , end='' )
for _ in range(0 , i + 1 ): # printing stars
print('* ' , end='' )
print()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
for i in range(__UpperCAmelCase , 0 , -1 ):
for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars
print('* ' , end='' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(' ' , end='' )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] ):
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(__UpperCAmelCase ) # upper half
reverse_floyd(__UpperCAmelCase ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
__magic_name__ = 1
while K:
__magic_name__ = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
__magic_name__ = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 679 | 1 |
import math
def UpperCAmelCase__( __UpperCAmelCase : int ):
__snake_case : Dict = [True] * n
__snake_case : List[Any] = False
__snake_case : Tuple = False
__snake_case : List[str] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__snake_case : Dict = i * 2
while index < n:
__snake_case : Tuple = False
__snake_case : str = index + i
__snake_case : List[Any] = [2]
for i in range(3 , __UpperCAmelCase , 2 ):
if is_prime[i]:
primes.append(__UpperCAmelCase )
return primes
def UpperCAmelCase__( __UpperCAmelCase : int = 99_99_66_66_33_33 ):
__snake_case : Any = math.floor(math.sqrt(__UpperCAmelCase ) ) + 1_00
__snake_case : List[str] = prime_sieve(__UpperCAmelCase )
__snake_case : Tuple = 0
__snake_case : int = 0
__snake_case : Union[str, Any] = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case : List[str] = primes[prime_index + 1]
__snake_case : List[str] = last_prime**2
__snake_case : Union[str, Any] = next_prime**2
# Get numbers divisible by lps(current)
__snake_case : Optional[int] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case : List[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case : Union[str, Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case : Any = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 679 | from timeit import timeit
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Dict = 0
while number:
number &= number - 1
result += 1
return result
def UpperCAmelCase__( __UpperCAmelCase : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__snake_case : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCAmelCase__( ):
def do_benchmark(__UpperCAmelCase : int ) -> None:
__snake_case : Optional[Any] = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
__snake_case : Dict = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 679 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__magic_name__ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ):
inspect_dataset(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Optional[Any] = path + '.py'
assert script_name in os.listdir(__UpperCAmelCase )
assert "__pycache__" not in os.listdir(__UpperCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] ):
inspect_metric(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : Optional[Any] = path + '.py'
assert script_name in os.listdir(__UpperCAmelCase )
assert "__pycache__" not in os.listdir(__UpperCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
__snake_case : Dict = get_dataset_config_info(__UpperCAmelCase , config_name=__UpperCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Any ):
with pytest.raises(__UpperCAmelCase ):
get_dataset_config_info(__UpperCAmelCase , config_name=__UpperCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Any ):
__snake_case : Any = get_dataset_config_names(__UpperCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : int ):
__snake_case : str = get_dataset_infos(__UpperCAmelCase )
assert list(infos.keys() ) == expected_configs
__snake_case : Tuple = expected_configs[0]
assert expected_config in infos
__snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int ):
__snake_case : List[Any] = get_dataset_infos(__UpperCAmelCase )
assert expected_config in infos
__snake_case : Optional[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ):
with pytest.raises(__UpperCAmelCase ):
get_dataset_split_names(__UpperCAmelCase , config_name=__UpperCAmelCase )
| 679 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ):
try:
__snake_case : Optional[int] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case : Optional[Any] = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False)
__magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True)
__magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
__magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
__magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
__magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
__magic_name__ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
__magic_name__ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
__magic_name__ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
__magic_name__ = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def UpperCAmelCase__( __UpperCAmelCase : Any ):
try:
import faiss # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import regex # noqa
except ImportError:
__snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ):
try:
import elasticsearch # noqa
except ImportError:
__snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import sqlalchemy # noqa
except ImportError:
__snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
if not config.TORCH_AVAILABLE:
__snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not config.TF_AVAILABLE:
__snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
if not config.JAX_AVAILABLE:
__snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
if not config.PIL_AVAILABLE:
__snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Tuple ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ):
def _require_spacy_model(__UpperCAmelCase : List[str] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip('test requires spacy' )(__UpperCAmelCase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def UpperCAmelCase__( __UpperCAmelCase : int ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__UpperCAmelCase )
else:
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Any ):
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
if not _run_local_tests or _run_local_tests == 0:
__snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : int ):
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( __UpperCAmelCase : str ):
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase )
return test_case
def UpperCAmelCase__( *__UpperCAmelCase : Any ):
def decorate(cls : List[str] ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith('test' ):
for decorator in decorators:
__snake_case : Optional[Any] = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
pass
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ):
__snake_case : Optional[Any] = requests.Session().request
def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case : int = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__snake_case : str = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case : Any = url
__snake_case : Union[str, Any] = e.args[0]
__snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__snake_case : str = (max_retry_error,)
raise
def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ):
__snake_case : Dict = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : Any = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCAmelCase__( ):
import gc
gc.collect()
__snake_case : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ):
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def UpperCAmelCase__( __UpperCAmelCase : List[str] ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = returncode
__snake_case : Tuple = stdout
__snake_case : List[Any] = stderr
async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ):
while True:
__snake_case : Optional[int] = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ):
if echo:
print('\nRunning: ' , ' '.join(__UpperCAmelCase ) )
__snake_case : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case : Any = []
__snake_case : Tuple = []
def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ):
__snake_case : int = line.decode('utf-8' ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ):
__snake_case : Any = asyncio.get_event_loop()
__snake_case : List[str] = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
__snake_case : Dict = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
__snake_case : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def UpperCAmelCase__( ):
__snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : Dict = 2_95_00
__snake_case : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 679 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__magic_name__ = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__magic_name__ = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__magic_name__ = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __SCREAMING_SNAKE_CASE ( datasets.Metric):
"""simple docstring"""
def lowercase_ ( self ):
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , ):
__snake_case : List[Any] = len(references[0] )
if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
__snake_case : Optional[int] = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )]
__snake_case : Union[str, Any] = TER(
normalized=_UpperCAmelCase , no_punct=_UpperCAmelCase , asian_support=_UpperCAmelCase , case_sensitive=_UpperCAmelCase , )
__snake_case : Union[str, Any] = sb_ter.corpus_score(_UpperCAmelCase , _UpperCAmelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 679 | from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
__snake_case : Optional[Any] = data
__snake_case : Node[T] | None = None
def __str__( self ):
return F"""{self.data}"""
class __SCREAMING_SNAKE_CASE ( Generic[T]):
"""simple docstring"""
def __init__( self ):
__snake_case : Node[T] | None = None
def __iter__( self ):
__snake_case : List[str] = self.top
while node:
yield node.data
__snake_case : Union[str, Any] = node.next
def __str__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def lowercase_ ( self ):
return self.top is None
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Any = Node(_UpperCAmelCase )
if not self.is_empty():
__snake_case : Any = self.top
__snake_case : Dict = node
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
__snake_case : Optional[int] = self.top
__snake_case : Dict = self.top.next
return pop_node.data
def lowercase_ ( self ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def lowercase_ ( self ):
__snake_case : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 679 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : Tuple = inspect.getfile(accelerate.test_utils )
__snake_case : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__snake_case : str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowercase_ ( self ):
__snake_case : Tuple = F"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
__snake_case : List[Any] = [sys.executable] + distributed_args
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
| 679 | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = ShapEPipeline
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = ["prompt"]
__UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCAmelCase = False
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return 32
@property
def lowercase_ ( self ):
return self.time_input_dim * 4
@property
def lowercase_ ( self ):
return 8
@property
def lowercase_ ( self ):
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Any = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Dict = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowercase_ ( self ):
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase )
return model
def lowercase_ ( self ):
__snake_case : Tuple = self.dummy_prior
__snake_case : Dict = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : str = self.dummy_renderer
__snake_case : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
__snake_case : Optional[int] = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_UpperCAmelCase ).startswith('mps' ):
__snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__snake_case : Tuple = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowercase_ ( self ):
__snake_case : Optional[int] = 'cpu'
__snake_case : Tuple = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Any = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
__snake_case : Union[str, Any] = output.images[0]
__snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ ( self ):
__snake_case : List[str] = torch_device == 'cpu'
__snake_case : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowercase_ ( self ):
__snake_case : Dict = self.get_dummy_components()
__snake_case : Any = self.pipeline_class(**_UpperCAmelCase )
__snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : int = 1
__snake_case : Optional[int] = 2
__snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def lowercase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
__snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
__snake_case : Optional[Any] = pipe(
'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 679 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__snake_case : List[str] = size if size is not None else {'shortest_edge': 18}
__snake_case : Optional[int] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__snake_case : Optional[int] = parent
__snake_case : int = batch_size
__snake_case : Optional[Any] = num_channels
__snake_case : Optional[Any] = image_size
__snake_case : int = min_resolution
__snake_case : Tuple = max_resolution
__snake_case : Optional[Any] = do_resize
__snake_case : List[Any] = size
__snake_case : Optional[Any] = do_center_crop
__snake_case : Optional[int] = crop_size
__snake_case : str = do_normalize
__snake_case : Tuple = image_mean
__snake_case : Optional[Any] = image_std
def lowercase_ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = LevitImageProcessor if is_vision_available() else None
def lowercase_ ( self ):
__snake_case : List[Any] = LevitImageProcessingTester(self )
@property
def lowercase_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) )
def lowercase_ ( self ):
__snake_case : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
# Initialize image_processing
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__snake_case : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase_ ( self ):
# Initialize image_processing
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__snake_case : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : List[Any] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase_ ( self ):
# Initialize image_processing
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Optional[int] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 679 | import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
# Initialise PyTorch model
__snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
__snake_case : int = TaForConditionalGeneration(__UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 679 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "speech_to_text"
__UpperCAmelCase = ["past_key_values"]
__UpperCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _UpperCAmelCase=10_000 , _UpperCAmelCase=12 , _UpperCAmelCase=2_048 , _UpperCAmelCase=4 , _UpperCAmelCase=6 , _UpperCAmelCase=2_048 , _UpperCAmelCase=4 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=6_000 , _UpperCAmelCase=1_024 , _UpperCAmelCase=2 , _UpperCAmelCase=(5, 5) , _UpperCAmelCase=1_024 , _UpperCAmelCase=80 , _UpperCAmelCase=1 , **_UpperCAmelCase , ):
__snake_case : Any = vocab_size
__snake_case : Optional[Any] = d_model
__snake_case : Union[str, Any] = encoder_ffn_dim
__snake_case : List[str] = encoder_layers
__snake_case : int = encoder_attention_heads
__snake_case : Dict = decoder_ffn_dim
__snake_case : str = decoder_layers
__snake_case : List[Any] = decoder_attention_heads
__snake_case : Tuple = dropout
__snake_case : Optional[int] = attention_dropout
__snake_case : List[str] = activation_dropout
__snake_case : int = activation_function
__snake_case : Union[str, Any] = init_std
__snake_case : str = encoder_layerdrop
__snake_case : List[str] = decoder_layerdrop
__snake_case : Union[str, Any] = use_cache
__snake_case : Any = encoder_layers
__snake_case : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case : Dict = max_source_positions
__snake_case : int = max_target_positions
__snake_case : str = num_conv_layers
__snake_case : Any = list(_UpperCAmelCase )
__snake_case : Optional[Any] = conv_channels
__snake_case : Dict = input_feat_per_channel
__snake_case : Tuple = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
| 679 | import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
__magic_name__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ):
__snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] )
__snake_case : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase )
self.init_weights()
__snake_case : str = 0
__snake_case : List[str] = 0
__snake_case : int = 0
__snake_case : Tuple = 0
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Dict = threshold
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : List[Any] = patience
def lowercase_ ( self ):
__snake_case : Dict = 0
__snake_case : Dict = 0
def lowercase_ ( self ):
__snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num
__snake_case : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_UpperCAmelCase )
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__snake_case : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
__snake_case : int = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
if token_type_ids is None:
__snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size()
__snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase )
__snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase )
else:
__snake_case : str = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers )
__snake_case : Any = self.embeddings(
input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase )
__snake_case : List[str] = embedding_output
if self.training:
__snake_case : Dict = []
for i in range(self.config.num_hidden_layers ):
__snake_case : str = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Optional[Any] = self.pooler(_UpperCAmelCase )
__snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) )
res.append(_UpperCAmelCase )
elif self.patience == 0: # Use all layers for inference
__snake_case : Dict = self.encoder(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__snake_case : str = self.pooler(encoder_outputs[0] )
__snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )]
else:
__snake_case : List[str] = 0
__snake_case : str = None
__snake_case : Tuple = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case : List[Any] = self.encoder.adaptive_forward(
_UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case : Any = self.pooler(_UpperCAmelCase )
__snake_case : int = output_layers[i](_UpperCAmelCase )
if regression:
__snake_case : Optional[int] = logits.detach()
if patient_result is not None:
__snake_case : Dict = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case : Any = 0
else:
__snake_case : str = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case : List[str] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ):
patient_counter += 1
else:
__snake_case : Dict = 0
__snake_case : str = logits
if patient_counter == self.patience:
break
__snake_case : str = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__snake_case : List[str] = config.num_labels
__snake_case : Dict = BertModelWithPabee(_UpperCAmelCase )
__snake_case : int = nn.Dropout(config.hidden_dropout_prob )
__snake_case : Optional[int] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
__snake_case : List[str] = self.bert(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case : int = (logits[-1],)
if labels is not None:
__snake_case : List[Any] = None
__snake_case : Optional[int] = 0
for ix, logits_item in enumerate(_UpperCAmelCase ):
if self.num_labels == 1:
# We are doing regression
__snake_case : List[str] = MSELoss()
__snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case : List[str] = CrossEntropyLoss()
__snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case : List[Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case : int = (total_loss / total_weights,) + outputs
return outputs
| 679 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "umt5"
__UpperCAmelCase = ["past_key_values"]
def __init__( self , _UpperCAmelCase=250_112 , _UpperCAmelCase=512 , _UpperCAmelCase=64 , _UpperCAmelCase=1_024 , _UpperCAmelCase=8 , _UpperCAmelCase=None , _UpperCAmelCase=6 , _UpperCAmelCase=32 , _UpperCAmelCase=128 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1E-6 , _UpperCAmelCase=1.0 , _UpperCAmelCase="gated-gelu" , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="T5Tokenizer" , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(
is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__snake_case : Optional[int] = vocab_size
__snake_case : Any = d_model
__snake_case : Optional[Any] = d_kv
__snake_case : int = d_ff
__snake_case : Tuple = num_layers
__snake_case : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__snake_case : Optional[int] = num_heads
__snake_case : str = relative_attention_num_buckets
__snake_case : List[Any] = relative_attention_max_distance
__snake_case : Dict = dropout_rate
__snake_case : List[Any] = layer_norm_epsilon
__snake_case : Optional[Any] = initializer_factor
__snake_case : str = feed_forward_proj
__snake_case : Optional[Any] = use_cache
__snake_case : Tuple = self.feed_forward_proj.split('-' )
__snake_case : Dict = act_info[-1]
__snake_case : Dict = act_info[0] == 'gated'
if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__snake_case : Union[str, Any] = 'gelu_new'
@property
def lowercase_ ( self ):
return self.d_model
@property
def lowercase_ ( self ):
return self.num_heads
@property
def lowercase_ ( self ):
return self.num_layers
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def lowercase_ ( self ):
__snake_case : Dict = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__snake_case : List[str] = 'past_encoder_sequence + sequence'
__snake_case : Union[str, Any] = {0: 'batch'}
__snake_case : Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__snake_case : List[str] = {0: 'batch', 1: 'decoder_sequence'}
__snake_case : Any = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def lowercase_ ( self ):
return 13
@property
def lowercase_ ( self ):
return 5E-4
| 679 | def UpperCAmelCase__( __UpperCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__snake_case : str = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__magic_name__ = input('''Enter a string ''').strip()
__magic_name__ = is_isogram(input_str)
print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 679 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.