code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class __lowerCamelCase : '''simple docstring''' def __init__( self ) -> Tuple: _a = {} def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int: if self.graph.get(__UpperCAmelCase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: _a = [[w, v]] if not self.graph.get(__UpperCAmelCase ): _a = [] def _UpperCAmelCase ( self ) -> int: return list(self.graph ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]: if s == d: return [] _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple: if c == -1: _a = floor(random() * 10000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _a = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]: _a = deque() _a = [] if s == -2: _a = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple: _a = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict: return len(self.graph[u] ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s _a = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return sorted_nodes def _UpperCAmelCase ( self ) -> Optional[int]: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Any: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]: _a = time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _a = time() return end - begin def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]: _a = time() self.bfs(__UpperCAmelCase ) _a = time() return end - begin class __lowerCamelCase : '''simple docstring''' def __init__( self ) -> Optional[int]: _a = {} def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict: # check if the u exists if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist _a = [[w, v]] # add the other way if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist _a = [[w, u]] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) # the other way round if self.graph.get(__UpperCAmelCase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict: if s == d: return [] _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple: if c == -1: _a = floor(random() * 10000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _a = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]: _a = deque() _a = [] if s == -2: _a = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict: return len(self.graph[u] ) def _UpperCAmelCase ( self ) -> int: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _UpperCAmelCase ( self ) -> Union[str, Any]: return list(self.graph ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple: _a = time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _a = time() return end - begin def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _a = time() self.bfs(__UpperCAmelCase ) _a = time() return end - begin
320
"""simple docstring""" from __future__ import annotations def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ): """simple docstring""" if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __lowerCamelCase : '''simple docstring''' @staticmethod def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: pass def A_ ( _lowerCAmelCase : Image ): """simple docstring""" _a = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def A_ ( _lowerCAmelCase : Image ): """simple docstring""" _a = np.array(_lowerCAmelCase ) _a = npimg.shape return {"hash": hashimage(_lowerCAmelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' A_ : Any = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) A_ : str = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: _a = MaskGenerationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int: pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def _UpperCAmelCase ( self ) -> List[str]: pass @slow @require_torch def _UpperCAmelCase ( self ) -> int: _a = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) _a = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 ) # Shortening by hashing _a = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871} ] , ) # fmt: on @require_torch @slow def _UpperCAmelCase ( self ) -> Any: _a = '''facebook/sam-vit-huge''' _a = pipeline('''mask-generation''' , model=__UpperCAmelCase ) _a = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing _a = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053}, ] , )
320
"""simple docstring""" def A_ ( ): """simple docstring""" _a = [] _a = 1 while len(_lowerCAmelCase ) < 1e6: constant.append(str(_lowerCAmelCase ) ) i += 1 _a = ''''''.join(_lowerCAmelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[9_99] ) * int(constant[99_99] ) * int(constant[9_99_99] ) * int(constant[99_99_99] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" def A_ ( _lowerCAmelCase : int = 50 ): """simple docstring""" _a = [1] * (length + 1) for row_length in range(3, length + 1 ): for block_length in range(3, row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
320
"""simple docstring""" import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __snake_case = logging.get_logger(__name__) __snake_case = { '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''', # See all BART models at https://huggingface.co/models?filter=bart } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[str] = 'bart' A_ : Optional[Any] = ['past_key_values'] A_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Tuple: _a = vocab_size _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = encoder_layerdrop _a = decoder_layerdrop _a = classifier_dropout _a = use_cache _a = encoder_layers _a = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): _a = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' '''The config can simply be saved and uploaded again to be fixed.''' ) class __lowerCamelCase ( a__ ): '''simple docstring''' @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _a = {0: '''batch'''} _a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: _a = {0: '''batch''', 1: '''decoder_sequence'''} _a = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _a , _a = self.num_layers for i in range(__UpperCAmelCase ): _a = {0: '''batch''', 2: '''past_sequence + sequence'''} _a = {0: '''batch''', 2: '''past_sequence + sequence'''} else: _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _a = super().outputs else: _a = super(__UpperCAmelCase , self ).outputs if self.use_past: _a , _a = self.num_layers for i in range(__UpperCAmelCase ): _a = {0: '''batch''', 2: '''past_sequence + sequence'''} _a = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs _a = seq_length if not self.use_past else 1 _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} _a = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape _a = common_inputs['''decoder_input_ids'''].shape[1] _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = decoder_seq_length + 3 _a = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _a = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) _a = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _a , _a = self.num_layers _a = min(__UpperCAmelCase , __UpperCAmelCase ) _a = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers _a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. _a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _a = seqlen + 2 _a , _a = self.num_layers _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = common_inputs['''attention_mask'''].dtype _a = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) _a = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _a = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _a = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) _a = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence _a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size _a = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _a = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": _a = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: if self.task in ["default", "seq2seq-lm"]: _a = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: _a = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
320
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu __snake_case = False class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _UpperCAmelCase ( self ) -> Any: return 12 @property def _UpperCAmelCase ( self ) -> int: return 12 @property def _UpperCAmelCase ( self ) -> Optional[int]: return 32 @property def _UpperCAmelCase ( self ) -> str: torch.manual_seed(0 ) _a = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def _UpperCAmelCase ( self ) -> List[Any]: _a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def _UpperCAmelCase ( self ) -> str: torch.manual_seed(0 ) _a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(__UpperCAmelCase ) @property def _UpperCAmelCase ( self ) -> Union[str, Any]: torch.manual_seed(0 ) _a = 12 _a = 12 _a = { '''attention_bias''': True, '''cross_attention_dim''': 32, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 32, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } _a = TransformeraDModel(**__UpperCAmelCase ) return model def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = '''cpu''' _a = self.dummy_vqvae _a = self.dummy_text_encoder _a = self.dummy_tokenizer _a = self.dummy_transformer _a = VQDiffusionScheduler(self.num_embed ) _a = LearnedClassifierFreeSamplingEmbeddings(learnable=__UpperCAmelCase ) _a = VQDiffusionPipeline( vqvae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , transformer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) _a = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _a = '''teddy bear playing in the pool''' _a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _a = pipe([prompt] , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''np''' ) _a = output.images _a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _a = pipe( [prompt] , generator=__UpperCAmelCase , output_type='''np''' , return_dict=__UpperCAmelCase , num_inference_steps=2 )[0] _a = image[0, -3:, -3:, -1] _a = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) _a = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _UpperCAmelCase ( self ) -> Tuple: _a = '''cpu''' _a = self.dummy_vqvae _a = self.dummy_text_encoder _a = self.dummy_tokenizer _a = self.dummy_transformer _a = VQDiffusionScheduler(self.num_embed ) _a = LearnedClassifierFreeSamplingEmbeddings( learnable=__UpperCAmelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) _a = VQDiffusionPipeline( vqvae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , transformer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , ) _a = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _a = '''teddy bear playing in the pool''' _a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _a = pipe([prompt] , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''np''' ) _a = output.images _a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _a = pipe( [prompt] , generator=__UpperCAmelCase , output_type='''np''' , return_dict=__UpperCAmelCase , num_inference_steps=2 )[0] _a = image[0, -3:, -3:, -1] _a = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) _a = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> List[str]: _a = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) _a = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) _a = pipeline.to(__UpperCAmelCase ) pipeline.set_progress_bar_config(disable=__UpperCAmelCase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though _a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _a = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=__UpperCAmelCase , output_type='''np''' , ) _a = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
320
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A_ ( _lowerCAmelCase : Dict ): """simple docstring""" if ( (cp >= 0x4e00 and cp <= 0x9fff) or (cp >= 0x3400 and cp <= 0x4dbf) # or (cp >= 0x2_0000 and cp <= 0x2_a6df) # or (cp >= 0x2_a700 and cp <= 0x2_b73f) # or (cp >= 0x2_b740 and cp <= 0x2_b81f) # or (cp >= 0x2_b820 and cp <= 0x2_ceaf) # or (cp >= 0xf900 and cp <= 0xfaff) or (cp >= 0x2_f800 and cp <= 0x2_fa1f) # ): # return True return False def A_ ( _lowerCAmelCase : str ): """simple docstring""" for char in word: _a = ord(_lowerCAmelCase ) if not _is_chinese_char(_lowerCAmelCase ): return 0 return 1 def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" _a = set() for token in tokens: _a = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase ) if chinese_word: word_set.add(_lowerCAmelCase ) _a = list(_lowerCAmelCase ) return word_list def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens _a = max([len(_lowerCAmelCase ) for w in chinese_word_set] ) _a = bert_tokens _a , _a = 0, len(_lowerCAmelCase ) while start < end: _a = True if is_chinese(bert_word[start] ): _a = min(end - start, _lowerCAmelCase ) for i in range(_lowerCAmelCase, 1, -1 ): _a = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1, start + i ): _a = '''##''' + bert_word[j] _a = start + i _a = False break if single_word: start += 1 return bert_word def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : LTP, _lowerCAmelCase : BertTokenizer ): """simple docstring""" _a = [] for i in range(0, len(_lowerCAmelCase ), 1_00 ): _a = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws _a = [get_chinese_word(_lowerCAmelCase ) for r in res] ltp_res.extend(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) _a = [] for i in range(0, len(_lowerCAmelCase ), 1_00 ): _a = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=_lowerCAmelCase, truncation=_lowerCAmelCase, max_length=5_12 ) bert_res.extend(res['''input_ids'''] ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) _a = [] for input_ids, chinese_word in zip(_lowerCAmelCase, _lowerCAmelCase ): _a = [] for id in input_ids: _a = bert_tokenizer._convert_id_to_token(_lowerCAmelCase ) input_tokens.append(_lowerCAmelCase ) _a = add_sub_symbol(_lowerCAmelCase, _lowerCAmelCase ) _a = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_lowerCAmelCase ): if token[:2] == "##": _a = token[2:] # save chinese tokens' pos if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ): ref_id.append(_lowerCAmelCase ) ref_ids.append(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) return ref_ids def A_ ( _lowerCAmelCase : Any ): """simple docstring""" with open(args.file_name, '''r''', encoding='''utf-8''' ) as f: _a = f.readlines() _a = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _a = LTP(args.ltp ) # faster in GPU device _a = BertTokenizer.from_pretrained(args.bert ) _a = prepare_ref(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) with open(args.save_path, '''w''', encoding='''utf-8''' ) as f: _a = [json.dumps(_lowerCAmelCase ) + '''\n''' for ref in ref_ids] f.writelines(_lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', required=False, type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', required=False, type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''', ) parser.add_argument( '''--bert''', required=False, type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''', ) parser.add_argument( '''--save_path''', required=False, type=str, default='''./resources/ref.txt''', help='''path to save res''', ) __snake_case = parser.parse_args() main(args)
320
1
"""simple docstring""" from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING __snake_case = logging.get_logger(__name__) @add_end_docstrings(a__ ) class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) self.check_model_type(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> int: _a , _a = {}, {} if padding is not None: _a = padding if truncation is not None: _a = truncation if top_k is not None: _a = top_k return preprocess_params, {}, postprocess_params def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ) -> Dict: if isinstance(__UpperCAmelCase , (Image.Image, str) ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ): _a = {'''image''': image, '''question''': question} else: _a = image _a = super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) return results def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Tuple: _a = load_image(inputs['''image'''] ) _a = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=__UpperCAmelCase , truncation=__UpperCAmelCase ) _a = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework ) model_inputs.update(__UpperCAmelCase ) return model_inputs def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: _a = self.model(**__UpperCAmelCase ) return model_outputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=5 ) -> Optional[Any]: if top_k > self.model.config.num_labels: _a = self.model.config.num_labels if self.framework == "pt": _a = model_outputs.logits.sigmoid()[0] _a , _a = probs.topk(__UpperCAmelCase ) else: raise ValueError(F'Unsupported framework: {self.framework}' ) _a = scores.tolist() _a = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCAmelCase , __UpperCAmelCase )]
320
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[Any] = 'gptj' A_ : Optional[int] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Union[str, Any]: _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = rotary_dim _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = use_cache _a = bos_token_id _a = eos_token_id super().__init__( bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase ) class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[Any]: super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ): # TODO: how to do that better? _a = 0 @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) _a = {0: '''batch''', 1: '''past_sequence + sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _UpperCAmelCase ( self ) -> int: return self._config.n_layer @property def _UpperCAmelCase ( self ) -> int: return self._config.n_head def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = super(__UpperCAmelCase , self ).generate_dummy_inputs( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) # We need to order the input in the way they appears in the forward() _a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _a = seqlen + 2 _a = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _a = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers ) ] _a = common_inputs['''attention_mask'''] if self.use_past: _a = ordered_inputs['''attention_mask'''].dtype _a = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _UpperCAmelCase ( self ) -> int: return 13
320
1
"""simple docstring""" import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Optional[int], _lowerCAmelCase : Dict ): """simple docstring""" _a = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase, config=_lowerCAmelCase ) _a = downstream_dict['''projector.weight'''] _a = downstream_dict['''projector.bias'''] _a = downstream_dict['''model.post_net.linear.weight'''] _a = downstream_dict['''model.post_net.linear.bias'''] return model def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Any, _lowerCAmelCase : List[Any] ): """simple docstring""" _a = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase, config=_lowerCAmelCase ) _a = downstream_dict['''model.linear.weight'''] _a = downstream_dict['''model.linear.bias'''] return model def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any] ): """simple docstring""" _a = WavaVecaForXVector.from_pretrained(_lowerCAmelCase, config=_lowerCAmelCase ) _a = downstream_dict['''connector.weight'''] _a = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _a = downstream_dict[ f'model.framelevel_feature_extractor.module.{i}.kernel.weight' ] _a = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias'] _a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] _a = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] _a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] _a = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] _a = downstream_dict['''objective.W'''] return model @torch.no_grad() def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : int, _lowerCAmelCase : Optional[int], _lowerCAmelCase : Union[str, Any] ): """simple docstring""" _a = torch.load(_lowerCAmelCase, map_location='''cpu''' ) _a = checkpoint['''Downstream'''] _a = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) _a = WavaVecaFeatureExtractor.from_pretrained( _lowerCAmelCase, return_attention_mask=_lowerCAmelCase, do_normalize=_lowerCAmelCase ) _a = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): _a = convert_classification(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) elif arch.endswith('''ForAudioFrameClassification''' ): _a = convert_diarization(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) elif arch.endswith('''ForXVector''' ): _a = convert_xvector(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) else: raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' ) if hf_config.use_weighted_layer_sum: _a = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(_lowerCAmelCase ) hf_model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') __snake_case = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
320
"""simple docstring""" import os import sys import unittest __snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') __snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> str: _a = get_test_to_tester_mapping(__UpperCAmelCase ) _a = get_test_to_tester_mapping(__UpperCAmelCase ) _a = {'''BertModelTest''': '''BertModelTester'''} _a = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = get_model_to_test_mapping(__UpperCAmelCase ) _a = get_model_to_test_mapping(__UpperCAmelCase ) _a = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } _a = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = get_model_to_tester_mapping(__UpperCAmelCase ) _a = get_model_to_tester_mapping(__UpperCAmelCase ) _a = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } _a = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
320
1
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset __snake_case = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __lowerCamelCase ( nn.Module ): '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> Optional[int]: super().__init__() _a = torchvision.models.resnetaaa(pretrained=__UpperCAmelCase ) _a = list(model.children() )[:-2] _a = nn.Sequential(*__UpperCAmelCase ) _a = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _UpperCAmelCase ( self , __UpperCAmelCase ) -> int: # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 _a = self.pool(self.model(__UpperCAmelCase ) ) _a = torch.flatten(__UpperCAmelCase , start_dim=2 ) _a = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: _a = [json.loads(__UpperCAmelCase ) for l in open(__UpperCAmelCase )] _a = os.path.dirname(__UpperCAmelCase ) _a = tokenizer _a = labels _a = len(__UpperCAmelCase ) _a = max_seq_length _a = transforms def __len__( self ) -> Tuple: return len(self.data ) def __getitem__( self , __UpperCAmelCase ) -> Optional[int]: _a = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=__UpperCAmelCase ) ) _a , _a , _a = sentence[0], sentence[1:-1], sentence[-1] _a = sentence[: self.max_seq_length] _a = torch.zeros(self.n_classes ) _a = 1 _a = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' ) _a = self.transforms(__UpperCAmelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _UpperCAmelCase ( self ) -> List[str]: _a = Counter() for row in self.data: label_freqs.update(row['''label'''] ) return label_freqs def A_ ( _lowerCAmelCase : str ): """simple docstring""" _a = [len(row['''sentence'''] ) for row in batch] _a , _a = len(_lowerCAmelCase ), max(_lowerCAmelCase ) _a = torch.zeros(_lowerCAmelCase, _lowerCAmelCase, dtype=torch.long ) _a = torch.zeros(_lowerCAmelCase, _lowerCAmelCase, dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_lowerCAmelCase, _lowerCAmelCase ) ): _a = input_row['''sentence'''] _a = 1 _a = torch.stack([row['''image'''] for row in batch] ) _a = torch.stack([row['''label'''] for row in batch] ) _a = torch.stack([row['''image_start_token'''] for row in batch] ) _a = torch.stack([row['''image_end_token'''] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def A_ ( ): """simple docstring""" return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def A_ ( ): """simple docstring""" return transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize( mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7], std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9], ), ] )
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __lowerCamelCase : '''simple docstring''' @staticmethod def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: pass def A_ ( _lowerCAmelCase : Image ): """simple docstring""" _a = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def A_ ( _lowerCAmelCase : Image ): """simple docstring""" _a = np.array(_lowerCAmelCase ) _a = npimg.shape return {"hash": hashimage(_lowerCAmelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' A_ : Any = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) A_ : str = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: _a = MaskGenerationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int: pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def _UpperCAmelCase ( self ) -> List[str]: pass @slow @require_torch def _UpperCAmelCase ( self ) -> int: _a = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) _a = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 ) # Shortening by hashing _a = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871} ] , ) # fmt: on @require_torch @slow def _UpperCAmelCase ( self ) -> Any: _a = '''facebook/sam-vit-huge''' _a = pipeline('''mask-generation''' , model=__UpperCAmelCase ) _a = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing _a = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053}, ] , )
320
1
"""simple docstring""" import re def A_ ( _lowerCAmelCase : str ): """simple docstring""" if len(re.findall('''[ATCG]''', _lowerCAmelCase ) ) != len(_lowerCAmelCase ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''', '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]: _a = parent _a = batch_size _a = encoder_seq_length _a = decoder_seq_length # For common tests _a = self.decoder_seq_length _a = is_training _a = use_attention_mask _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = d_ff _a = relative_attention_num_buckets _a = dropout_rate _a = initializer_factor _a = eos_token_id _a = pad_token_id _a = decoder_start_token_id _a = None _a = decoder_layers def _UpperCAmelCase ( self ) -> Dict: return TaConfig.from_pretrained('''google/umt5-base''' ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]: if attention_mask is None: _a = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _a = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase ) if decoder_head_mask is None: _a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase ) if cross_attn_head_mask is None: _a = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def _UpperCAmelCase ( self ) -> Tuple: _a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) _a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _a = input_ids.clamp(self.pad_token_id + 1 ) _a = decoder_input_ids.clamp(self.pad_token_id + 1 ) _a = self.get_config() _a = config.num_attention_heads _a = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, input_dict def _UpperCAmelCase ( self ) -> int: _a , _a = self.prepare_config_and_inputs() return config, inputs_dict def _UpperCAmelCase ( self ) -> Tuple: return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _UpperCAmelCase ( self ) -> List[str]: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict: _a = UMTaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _a = model( input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , ) _a = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ) _a = result.last_hidden_state _a = result.past_key_values _a = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]: _a = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval() # first forward pass _a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _a = model(__UpperCAmelCase ) _a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _a , _a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _a = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and _a = torch.cat([input_ids, next_tokens] , dim=-1 ) _a = model(__UpperCAmelCase )['''last_hidden_state'''] _a = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state'''] # select random slice _a = ids_tensor((1,) , output_from_past.shape[-1] ).item() _a = output_from_no_past[:, -1, random_slice_idx].detach() _a = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]: _a = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval() _a = model(**__UpperCAmelCase )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() ) @require_torch class __lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) A_ : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () A_ : int = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) A_ : str = True A_ : List[str] = False A_ : List[Any] = False A_ : str = True A_ : List[str] = True # The small UMT5 model needs higher percentages for CPU/MP tests A_ : Optional[Any] = [0.8, 0.9] def _UpperCAmelCase ( self ) -> Tuple: _a = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def _UpperCAmelCase ( self ) -> int: _a = self.model_tester.prepare_config_and_inputs() _a = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( __UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] _a = self.model_tester.prepare_config_and_inputs() _a = config_and_inputs[0] _a = UMTaForConditionalGeneration(__UpperCAmelCase ).eval() model.to(__UpperCAmelCase ) _a = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ), } for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ): _a = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _a = torch.ones( config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ) _a = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , ) # We check the state of decoder_attentions and cross_attentions just from the last step _a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def _UpperCAmelCase ( self ) -> int: pass @require_torch @require_sentencepiece @require_tokenizers class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def _UpperCAmelCase ( self ) -> Optional[int]: _a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase ) _a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase ) _a = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] _a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids # fmt: off _a = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase ) _a = model.generate(input_ids.to(__UpperCAmelCase ) ) _a = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] _a = tokenizer.batch_decode(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
320
1
"""simple docstring""" def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : str ): """simple docstring""" if len(_lowerCAmelCase ) != len(_lowerCAmelCase ): raise ValueError('''String lengths must match!''' ) _a = 0 for chara, chara in zip(_lowerCAmelCase, _lowerCAmelCase ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class __lowerCamelCase : '''simple docstring''' def __init__( self ) -> Tuple: _a = {} def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int: if self.graph.get(__UpperCAmelCase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: _a = [[w, v]] if not self.graph.get(__UpperCAmelCase ): _a = [] def _UpperCAmelCase ( self ) -> int: return list(self.graph ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]: if s == d: return [] _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple: if c == -1: _a = floor(random() * 10000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _a = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]: _a = deque() _a = [] if s == -2: _a = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple: _a = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict: return len(self.graph[u] ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s _a = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return sorted_nodes def _UpperCAmelCase ( self ) -> Optional[int]: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Any: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]: _a = time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _a = time() return end - begin def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]: _a = time() self.bfs(__UpperCAmelCase ) _a = time() return end - begin class __lowerCamelCase : '''simple docstring''' def __init__( self ) -> Optional[int]: _a = {} def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict: # check if the u exists if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist _a = [[w, v]] # add the other way if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist _a = [[w, u]] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) # the other way round if self.graph.get(__UpperCAmelCase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict: if s == d: return [] _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple: if c == -1: _a = floor(random() * 10000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _a = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]: _a = deque() _a = [] if s == -2: _a = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict: return len(self.graph[u] ) def _UpperCAmelCase ( self ) -> int: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _UpperCAmelCase ( self ) -> Union[str, Any]: return list(self.graph ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple: _a = time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _a = time() return end - begin def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _a = time() self.bfs(__UpperCAmelCase ) _a = time() return end - begin
320
1
"""simple docstring""" import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def A_ ( _lowerCAmelCase : List[Any] ): """simple docstring""" def wrapper(*_lowerCAmelCase : Union[str, Any], **_lowerCAmelCase : Any ): _a = timeit.default_timer() _a = func(*_lowerCAmelCase, **_lowerCAmelCase ) _a = timeit.default_timer() - starttime return delta _a = func.__name__ return wrapper def A_ ( _lowerCAmelCase : dict, _lowerCAmelCase : Any=1_00, _lowerCAmelCase : List[str]=None ): """simple docstring""" _a = [] _a = seq_shapes or {} for i in range(_lowerCAmelCase ): _a = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(_lowerCAmelCase, _ArrayXD ): _a = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(_lowerCAmelCase, datasets.Value ): if v.dtype == "string": _a = '''The small grey turtle was surprisingly fast when challenged.''' else: _a = np.random.randint(10, size=1 ).astype(v.dtype ).item() elif isinstance(_lowerCAmelCase, datasets.Sequence ): while isinstance(_lowerCAmelCase, datasets.Sequence ): _a = v.feature _a = seq_shapes[k] _a = np.random.rand(*_lowerCAmelCase ).astype(v.dtype ) _a = data dummy_data.append((i, example) ) return dummy_data def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any]=1_00, _lowerCAmelCase : Any=None ): """simple docstring""" _a = generate_examples(_lowerCAmelCase, num_examples=_lowerCAmelCase, seq_shapes=_lowerCAmelCase ) with ArrowWriter(features=_lowerCAmelCase, path=_lowerCAmelCase ) as writer: for key, record in dummy_data: _a = features.encode_example(_lowerCAmelCase ) writer.write(_lowerCAmelCase ) _a , _a = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' ) _a = datasets.Dataset.from_file(filename=_lowerCAmelCase, info=datasets.DatasetInfo(features=_lowerCAmelCase ) ) return dataset
320
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Dict = 'unispeech' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) -> Union[str, Any]: super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) _a = hidden_size _a = feat_extract_norm _a = feat_extract_activation _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = conv_bias _a = num_conv_pos_embeddings _a = num_conv_pos_embedding_groups _a = len(self.conv_dim ) _a = num_hidden_layers _a = intermediate_size _a = hidden_act _a = num_attention_heads _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = feat_proj_dropout _a = final_dropout _a = layerdrop _a = layer_norm_eps _a = initializer_range _a = num_ctc_classes _a = vocab_size _a = do_stable_layer_norm _a = use_weighted_layer_sum _a = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a = apply_spec_augment _a = mask_time_prob _a = mask_time_length _a = mask_time_min_masks _a = mask_feature_prob _a = mask_feature_length _a = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _a = num_codevectors_per_group _a = num_codevector_groups _a = contrastive_logits_temperature _a = feat_quantizer_dropout _a = num_negatives _a = codevector_dim _a = proj_codevector_dim _a = diversity_loss_weight # ctc loss _a = ctc_loss_reduction _a = ctc_zero_infinity # pretraining loss _a = replace_prob @property def _UpperCAmelCase ( self ) -> Optional[int]: return functools.reduce(operator.mul , self.conv_stride , 1 )
320
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : str = ['keras_nlp'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(self , ['''keras_nlp'''] )
320
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: __snake_case = None __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } __snake_case = { '''google/rembert''': 256, } __snake_case = '''▁''' class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Optional[Any] = VOCAB_FILES_NAMES A_ : List[str] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : List[Any] = RemBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it _a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) _a = do_lower_case _a = remove_space _a = keep_accents _a = vocab_file _a = False if not self.vocab_file else True def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(__UpperCAmelCase ) ) return _a = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
320
1
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Tuple = ['image_processor', 'tokenizer'] A_ : Union[str, Any] = 'AutoImageProcessor' A_ : Optional[Any] = 'AutoTokenizer' def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: super().__init__(__UpperCAmelCase , __UpperCAmelCase ) _a = self.image_processor def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Tuple: if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: _a = self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: _a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: _a = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def _UpperCAmelCase ( self ) -> Any: return ["input_ids", "attention_mask", "pixel_values"]
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __snake_case = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" def A_ ( _lowerCAmelCase : list, _lowerCAmelCase : list, _lowerCAmelCase : int ): """simple docstring""" if len(_lowerCAmelCase ) != len(_lowerCAmelCase ): raise ValueError('''The length of profit and weight must be same.''' ) if max_weight <= 0: raise ValueError('''max_weight must greater than zero.''' ) if any(p < 0 for p in profit ): raise ValueError('''Profit can not be negative.''' ) if any(w < 0 for w in weight ): raise ValueError('''Weight can not be negative.''' ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. _a = [p / w for p, w in zip(_lowerCAmelCase, _lowerCAmelCase )] # Creating a copy of the list and sorting profit/weight in ascending order _a = sorted(_lowerCAmelCase ) # declaring useful variables _a = len(_lowerCAmelCase ) _a = 0 _a = 0 _a = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight _a = sorted_profit_by_weight[length - i - 1] _a = profit_by_weight.index(_lowerCAmelCase ) _a = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( '''Input profits, weights, and then max_weight (all positive ints) separated by ''' '''spaces.''' ) __snake_case = [int(x) for x in input('''Input profits separated by spaces: ''').split()] __snake_case = [int(x) for x in input('''Input weights separated by spaces: ''').split()] __snake_case = int(input('''Max weight allowed: ''')) # Function Call calc_profit(profit, weight, max_weight)
320
"""simple docstring""" import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __lowerCamelCase ( a__ ): '''simple docstring''' @require_torch def _UpperCAmelCase ( self ) -> Union[str, Any]: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache _a = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__UpperCAmelCase ) BertModel.from_pretrained(__UpperCAmelCase ) BertTokenizer.from_pretrained(__UpperCAmelCase ) pipeline(task='''fill-mask''' , model=__UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed _a = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a = '''1''' _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache _a = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__UpperCAmelCase ) BertModel.from_pretrained(__UpperCAmelCase ) BertTokenizer.from_pretrained(__UpperCAmelCase ) pipeline(task='''fill-mask''' , model=__UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed _a = self.get_env() _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def _UpperCAmelCase ( self ) -> Optional[Any]: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a = ''' from transformers import BertConfig, BertModel, BertTokenizer ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket ''' # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed _a = self.get_env() _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # next emulate no network _a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a = '''1''' _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def _UpperCAmelCase ( self ) -> Tuple: _a = ''' from transformers import pipeline ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket ''' _a = self.get_env() _a = '''1''' _a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( '''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: _a = ''' from transformers import AutoModel ''' _a = ''' mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") ''' # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed _a = self.get_env() _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a = '''1''' _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() )
320
1
"""simple docstring""" from __future__ import annotations def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ): """simple docstring""" if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : str = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Any = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Dict = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Tuple = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Any = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ['''flax'''] )
320
1
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def A_ ( ): """simple docstring""" _a , _a = 9, 14 # noqa: F841 _a = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _a = defaultdict(_lowerCAmelCase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _a = mst(_lowerCAmelCase ) _a = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _a = tuple(answer[:2] ) _a = tuple(edge[::-1] ) assert edge in result or reverse in result
320
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __snake_case = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __snake_case = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' __snake_case = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" def remove_articles(_lowerCAmelCase : Optional[int] ): _a = re.compile(R'''\b(a|an|the)\b''', re.UNICODE ) return re.sub(_lowerCAmelCase, ''' ''', _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase : Tuple ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase : Tuple ): _a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase : List[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ): """simple docstring""" return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ): """simple docstring""" _a = [any(compute_exact(_lowerCAmelCase, _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 1_00 def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : str ): """simple docstring""" _a = [rgram for rgrams in rgramslist for rgram in rgrams] _a = Counter(_lowerCAmelCase ) _a = Counter(_lowerCAmelCase ) _a = Counter() for sgram, scount in sgramcounter.items(): _a = scount * numref _a = Counter(_lowerCAmelCase ) _a = Counter() for cgram, ccount in cgramcounter.items(): _a = ccount * numref # KEEP _a = sgramcounter_rep & cgramcounter_rep _a = keepgramcounter_rep & rgramcounter _a = sgramcounter_rep & rgramcounter _a = 0 _a = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(_lowerCAmelCase ) > 0: _a = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _a = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _a = 0 if keepscore_precision > 0 or keepscore_recall > 0: _a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _a = sgramcounter_rep - cgramcounter_rep _a = delgramcounter_rep - rgramcounter _a = sgramcounter_rep - rgramcounter _a = 0 _a = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 if len(_lowerCAmelCase ) > 0: _a = deltmpscorea / len(_lowerCAmelCase ) # ADDITION _a = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) _a = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) _a = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) _a = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(_lowerCAmelCase ) > 0: _a = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: _a = addtmpscore / len(_lowerCAmelCase ) _a = 0 if addscore_precision > 0 or addscore_recall > 0: _a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Any ): """simple docstring""" _a = len(_lowerCAmelCase ) _a = ssent.split(''' ''' ) _a = csent.split(''' ''' ) _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] for rsent in rsents: _a = rsent.split(''' ''' ) _a = [] _a = [] _a = [] ragramslist.append(_lowerCAmelCase ) for i in range(0, len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: _a = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0, len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: _a = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0, len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: _a = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _a = sum([delascore, delascore, delascore, delascore] ) / 4 _a = sum([addascore, addascore, addascore, addascore] ) / 4 _a = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : bool = True, _lowerCAmelCase : str = "13a", _lowerCAmelCase : bool = True ): """simple docstring""" if lowercase: _a = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _a = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: _a = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": _a = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase, escape=_lowerCAmelCase ) elif tokenizer == "penn": _a = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase ) else: _a = sentence if not return_str: _a = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ): """simple docstring""" if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _a = 0 for src, pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ), normalize(_lowerCAmelCase ), [normalize(_lowerCAmelCase ) for sent in refs] ) _a = sari_score / len(_lowerCAmelCase ) return 1_00 * sari_score def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple, _lowerCAmelCase : Any="exp", _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Union[str, Any]=False, _lowerCAmelCase : Optional[Any]=False, _lowerCAmelCase : List[str]=False, ): """simple docstring""" _a = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _a = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] _a = sacrebleu.corpus_bleu( _lowerCAmelCase, _lowerCAmelCase, smooth_method=_lowerCAmelCase, smooth_value=_lowerCAmelCase, force=_lowerCAmelCase, lowercase=_lowerCAmelCase, use_effective_order=_lowerCAmelCase, ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCamelCase ( datasets.Metric ): '''simple docstring''' def _UpperCAmelCase ( self ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: _a = {} result.update({'''sari''': compute_sari(sources=__UpperCAmelCase , predictions=__UpperCAmelCase , references=__UpperCAmelCase )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} ) result.update({'''exact''': compute_em(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} ) return result
320
1
"""simple docstring""" import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" _a = [] for line in lines: _a = re.sub(R'''#.*''', '''''', _lowerCAmelCase ) # remove comments if line: filtered_lines.append(_lowerCAmelCase ) _a = '''\n'''.join(_lowerCAmelCase ) # Make a hash from all this code _a = full_str.encode('''utf-8''' ) return shaaaa(_lowerCAmelCase ).hexdigest() # get importable module names and hash for caching __snake_case = { '''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), '''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), '''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), '''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), '''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), '''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), '''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), '''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions __snake_case = { '''.csv''': ('''csv''', {}), '''.tsv''': ('''csv''', {'''sep''': '''\t'''}), '''.json''': ('''json''', {}), '''.jsonl''': ('''json''', {}), '''.parquet''': ('''parquet''', {}), '''.arrow''': ('''arrow''', {}), '''.txt''': ('''text''', {}), } _EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) __snake_case = {'''imagefolder''', '''audiofolder'''} # Used to filter data files based on extensions given a module name __snake_case = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''') _MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
320
"""simple docstring""" def A_ ( _lowerCAmelCase : int = 50 ): """simple docstring""" _a = [1] * (length + 1) for row_length in range(3, length + 1 ): for block_length in range(3, row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
320
1
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets __snake_case = '''\ @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __snake_case = '''\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. ''' __snake_case = ''' Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: \'score\' (float): The chrF (chrF++) score, \'char_order\' (int): The character n-gram order, \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, \'beta\' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCamelCase ( datasets.Metric ): '''simple docstring''' def _UpperCAmelCase ( self ) -> Dict: if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[ '''https://github.com/m-popovic/chrF''', ] , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = CHRF.CHAR_ORDER , __UpperCAmelCase = CHRF.WORD_ORDER , __UpperCAmelCase = CHRF.BETA , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , ) -> Optional[int]: _a = len(references[0] ) if any(len(__UpperCAmelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _a = [[refs[i] for refs in references] for i in range(__UpperCAmelCase )] _a = CHRF(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _a = sb_chrf.corpus_score(__UpperCAmelCase , __UpperCAmelCase ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
320
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __snake_case = logging.get_logger('''transformers.models.speecht5''') __snake_case = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } __snake_case = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } __snake_case = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } __snake_case = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } __snake_case = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } __snake_case = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } __snake_case = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } __snake_case = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } __snake_case = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __snake_case = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __snake_case = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __snake_case = [] __snake_case = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] __snake_case = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] __snake_case = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] __snake_case = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[int] ): """simple docstring""" for attribute in key.split('''.''' ): _a = getattr(_lowerCAmelCase, _lowerCAmelCase ) if weight_type is not None: _a = getattr(_lowerCAmelCase, _lowerCAmelCase ).shape else: _a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": _a = value elif weight_type == "weight_g": _a = value elif weight_type == "weight_v": _a = value elif weight_type == "bias": _a = value elif weight_type == "running_mean": _a = value elif weight_type == "running_var": _a = value elif weight_type == "num_batches_tracked": _a = value else: _a = value logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple ): """simple docstring""" for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: _a , _a = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int ): """simple docstring""" _a = [] if task == "s2t": _a = hf_model.speechta.encoder.prenet.feature_encoder _a = MAPPING_S2T _a = IGNORE_KEYS_S2T elif task == "t2s": _a = None _a = MAPPING_T2S _a = IGNORE_KEYS_T2S elif task == "s2s": _a = hf_model.speechta.encoder.prenet.feature_encoder _a = MAPPING_S2S _a = IGNORE_KEYS_S2S else: raise ValueError(f'Unsupported task: {task}' ) for name, value in fairseq_dict.items(): if should_ignore(_lowerCAmelCase, _lowerCAmelCase ): logger.info(f'{name} was ignored' ) continue _a = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', ) _a = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: _a , _a = key.split('''.*.''' ) if prefix in name and suffix in name: _a = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: _a = True if "*" in mapped_key: _a = name.split(_lowerCAmelCase )[0].split('''.''' )[-2] _a = mapped_key.replace('''*''', _lowerCAmelCase ) if "weight_g" in name: _a = '''weight_g''' elif "weight_v" in name: _a = '''weight_v''' elif "bias" in name: _a = '''bias''' elif "weight" in name: _a = '''weight''' elif "running_mean" in name: _a = '''running_mean''' elif "running_var" in name: _a = '''running_var''' elif "num_batches_tracked" in name: _a = '''num_batches_tracked''' else: _a = None set_recursively(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f'Unused weights: {unused_weights}' ) def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any] ): """simple docstring""" _a = full_name.split('''conv_layers.''' )[-1] _a = name.split('''.''' ) _a = int(items[0] ) _a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _a = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _a = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) _a = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) _a = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : int=None, ): """simple docstring""" if config_path is not None: _a = SpeechTaConfig.from_pretrained(_lowerCAmelCase ) else: _a = SpeechTaConfig() if task == "s2t": _a = config.max_text_positions _a = SpeechTaForSpeechToText(_lowerCAmelCase ) elif task == "t2s": _a = 18_76 _a = 6_00 _a = config.max_speech_positions _a = SpeechTaForTextToSpeech(_lowerCAmelCase ) elif task == "s2s": _a = 18_76 _a = config.max_speech_positions _a = SpeechTaForSpeechToSpeech(_lowerCAmelCase ) else: raise ValueError(f'Unknown task name: {task}' ) if vocab_path: _a = SpeechTaTokenizer(_lowerCAmelCase, model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it _a = AddedToken('''<mask>''', lstrip=_lowerCAmelCase, rstrip=_lowerCAmelCase ) _a = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) _a = SpeechTaFeatureExtractor() _a = SpeechTaProcessor(tokenizer=_lowerCAmelCase, feature_extractor=_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) _a = torch.load(_lowerCAmelCase ) recursively_load_weights(fairseq_checkpoint['''model'''], _lowerCAmelCase, _lowerCAmelCase ) model.save_pretrained(_lowerCAmelCase ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(_lowerCAmelCase ) model.push_to_hub(_lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __snake_case = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
320
1
"""simple docstring""" import logging import os import threading import time try: import warnings except ImportError: __snake_case = None try: import msvcrt except ImportError: __snake_case = None try: import fcntl except ImportError: __snake_case = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: __snake_case = OSError # Data # ------------------------------------------------ __snake_case = [ '''Timeout''', '''BaseFileLock''', '''WindowsFileLock''', '''UnixFileLock''', '''SoftFileLock''', '''FileLock''', ] __snake_case = '''3.0.12''' __snake_case = None def A_ ( ): """simple docstring""" global _logger _a = _logger or logging.getLogger(__name__ ) return _logger class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> int: _a = lock_file return None def __str__( self ) -> int: _a = F'The file lock \'{self.lock_file}\' could not be acquired.' return temp class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> List[Any]: _a = lock return None def __enter__( self ) -> Optional[int]: return self.lock def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: self.lock.release() return None class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ) -> Any: _a = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long _a = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase ) # The path to the lock file. _a = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. _a = None # The default timeout value. _a = timeout # We use this lock primarily for the lock counter. _a = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. _a = 0 return None @property def _UpperCAmelCase ( self ) -> str: return self._lock_file @property def _UpperCAmelCase ( self ) -> List[Any]: return self._timeout @timeout.setter def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]: _a = float(__UpperCAmelCase ) return None def _UpperCAmelCase ( self ) -> Dict: raise NotImplementedError() def _UpperCAmelCase ( self ) -> Optional[Any]: raise NotImplementedError() @property def _UpperCAmelCase ( self ) -> Optional[Any]: return self._lock_file_fd is not None def _UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ) -> str: # Use the default timeout, if no timeout is provided. if timeout is None: _a = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 _a = id(self ) _a = self._lock_file _a = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' ) self._acquire() if self.is_locked: logger().debug(F'Lock {lock_id} acquired on {lock_filename}' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' ) raise Timeout(self._lock_file ) else: logger().debug( F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' ) time.sleep(__UpperCAmelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: _a = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def _UpperCAmelCase ( self , __UpperCAmelCase=False ) -> Dict: with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: _a = id(self ) _a = self._lock_file logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' ) self._release() _a = 0 logger().debug(F'Lock {lock_id} released on {lock_filename}' ) return None def __enter__( self ) -> Optional[int]: self.acquire() return self def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: self.release() return None def __del__( self ) -> List[str]: self.release(force=__UpperCAmelCase ) return None def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> str: _a = os.path.basename(__UpperCAmelCase ) if len(__UpperCAmelCase ) > max_length and max_length > 0: _a = os.path.dirname(__UpperCAmelCase ) _a = str(hash(__UpperCAmelCase ) ) _a = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__UpperCAmelCase , __UpperCAmelCase ) else: return path class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ) -> str: from .file_utils import relative_to_absolute_path super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) _a = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def _UpperCAmelCase ( self ) -> Optional[int]: _a = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: _a = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: try: msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__UpperCAmelCase ) else: _a = fd return None def _UpperCAmelCase ( self ) -> str: _a = self._lock_file_fd _a = None msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 ) os.close(__UpperCAmelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ) -> Dict: _a = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: _a = os.O_RDWR | os.O_CREAT | os.O_TRUNC _a = os.open(self._lock_file , __UpperCAmelCase ) try: fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__UpperCAmelCase ) else: _a = fd return None def _UpperCAmelCase ( self ) -> List[Any]: # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition _a = self._lock_file_fd _a = None fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN ) os.close(__UpperCAmelCase ) return None class __lowerCamelCase ( a__ ): '''simple docstring''' def _UpperCAmelCase ( self ) -> int: _a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: _a = os.open(self._lock_file , __UpperCAmelCase ) except OSError: pass else: _a = fd return None def _UpperCAmelCase ( self ) -> Optional[int]: os.close(self._lock_file_fd ) _a = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None __snake_case = None if msvcrt: __snake_case = WindowsFileLock elif fcntl: __snake_case = UnixFileLock else: __snake_case = SoftFileLock if warnings is not None: warnings.warn('''only soft file lock is available''')
320
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[Any] = 'decision_transformer' A_ : Union[str, Any] = ['past_key_values'] A_ : str = { 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , __UpperCAmelCase=17 , __UpperCAmelCase=4 , __UpperCAmelCase=128 , __UpperCAmelCase=4096 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=1024 , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[int]: _a = state_dim _a = act_dim _a = hidden_size _a = max_ep_len _a = action_tanh _a = vocab_size _a = n_positions _a = n_layer _a = n_head _a = n_inner _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = scale_attn_weights _a = use_cache _a = scale_attn_by_inverse_layer_idx _a = reorder_and_upcast_attn _a = bos_token_id _a = eos_token_id super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
320
1
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path __snake_case = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) __snake_case = [ord(letter) for letter in string.ascii_lowercase] __snake_case = {ord(char) for char in VALID_CHARS} __snake_case = ["the", "be", "to", "of", "and", "in", "that", "have"] def A_ ( _lowerCAmelCase : list[int], _lowerCAmelCase : tuple[int, ...] ): """simple docstring""" _a = "" _a = 42 _a = 42 _a = 42 for keychar, cipherchar in zip(cycle(_lowerCAmelCase ), _lowerCAmelCase ): _a = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(_lowerCAmelCase ) return decoded def A_ ( _lowerCAmelCase : list[int] ): """simple docstring""" _a = [] for key in product(_lowerCAmelCase, repeat=3 ): _a = try_key(_lowerCAmelCase, _lowerCAmelCase ) if encoded is not None: possibles.append(_lowerCAmelCase ) return possibles def A_ ( _lowerCAmelCase : list[str], _lowerCAmelCase : str ): """simple docstring""" return [possible for possible in possibles if common_word in possible.lower()] def A_ ( _lowerCAmelCase : str = "p059_cipher.txt" ): """simple docstring""" _a = 42 _a = 42 _a = 42 _a = 42 _a = Path(_lowerCAmelCase ).parent.joinpath(_lowerCAmelCase ).read_text(encoding='''utf-8''' ) _a = [int(_lowerCAmelCase ) for number in data.strip().split(''',''' )] _a = filter_valid_chars(_lowerCAmelCase ) for common_word in COMMON_WORDS: _a = filter_common_word(_lowerCAmelCase, _lowerCAmelCase ) if len(_lowerCAmelCase ) == 1: break _a = possibles[0] return sum(ord(_lowerCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(f'{solution() = }')
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __snake_case = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[str] = ['pixel_values'] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None: super().__init__(**__UpperCAmelCase ) _a = size if size is not None else {'''shortest_edge''': 224} _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) _a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) _a = do_resize _a = size _a = resample _a = do_center_crop _a = crop_size _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _a = image_std if image_std is not None else OPENAI_CLIP_STD _a = do_convert_rgb def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _a = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: _a = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]: return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image: _a = do_resize if do_resize is not None else self.do_resize _a = size if size is not None else self.size _a = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) _a = resample if resample is not None else self.resample _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _a = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _a = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. _a = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: _a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: _a = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: _a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: _a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] _a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
320
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Dict = 'unispeech' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) -> Union[str, Any]: super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) _a = hidden_size _a = feat_extract_norm _a = feat_extract_activation _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = conv_bias _a = num_conv_pos_embeddings _a = num_conv_pos_embedding_groups _a = len(self.conv_dim ) _a = num_hidden_layers _a = intermediate_size _a = hidden_act _a = num_attention_heads _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = feat_proj_dropout _a = final_dropout _a = layerdrop _a = layer_norm_eps _a = initializer_range _a = num_ctc_classes _a = vocab_size _a = do_stable_layer_norm _a = use_weighted_layer_sum _a = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a = apply_spec_augment _a = mask_time_prob _a = mask_time_length _a = mask_time_min_masks _a = mask_feature_prob _a = mask_feature_length _a = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _a = num_codevectors_per_group _a = num_codevector_groups _a = contrastive_logits_temperature _a = feat_quantizer_dropout _a = num_negatives _a = codevector_dim _a = proj_codevector_dim _a = diversity_loss_weight # ctc loss _a = ctc_loss_reduction _a = ctc_zero_infinity # pretraining loss _a = replace_prob @property def _UpperCAmelCase ( self ) -> Optional[int]: return functools.reduce(operator.mul , self.conv_stride , 1 )
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __snake_case = logging.get_logger(__name__) __snake_case = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, '''constant''': get_constant_schedule, '''constant_w_warmup''': get_constant_schedule_with_warmup, } class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) if config is None: assert isinstance(self.model , __UpperCAmelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F' {self.model.__class__}' ) _a = self.model.config else: _a = config _a = data_args _a = self.config.tgt_vocab_size if isinstance(self.config , __UpperCAmelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for' ''' padding..''' ) if self.args.label_smoothing == 0: _a = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss _a = label_smoothed_nll_loss def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: if self.optimizer is None: _a = ['''bias''', '''LayerNorm.weight'''] _a = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] _a = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: _a = Adafactor _a = {'''scale_parameter''': False, '''relative_step''': False} else: _a = AdamW _a = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } _a = self.args.learning_rate if self.sharded_ddp: _a = OSS( params=__UpperCAmelCase , optim=__UpperCAmelCase , **__UpperCAmelCase , ) else: _a = optimizer_cls(__UpperCAmelCase , **__UpperCAmelCase ) if self.lr_scheduler is None: _a = self._get_lr_scheduler(__UpperCAmelCase ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict: _a = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": _a = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": _a = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: _a = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__UpperCAmelCase ) return scheduler def _UpperCAmelCase ( self ) -> Optional[torch.utils.data.Sampler]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token _a = model(**__UpperCAmelCase , use_cache=__UpperCAmelCase )[0] _a = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models _a , _a = model(**__UpperCAmelCase , labels=__UpperCAmelCase , use_cache=__UpperCAmelCase )[:2] else: # compute label smoothed loss _a = model(**__UpperCAmelCase , use_cache=__UpperCAmelCase )[0] _a = torch.nn.functional.log_softmax(__UpperCAmelCase , dim=-1 ) _a , _a = self.loss_fn(__UpperCAmelCase , __UpperCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: _a = inputs.pop('''labels''' ) _a , _a = self._compute_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return loss def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: _a = self._prepare_inputs(__UpperCAmelCase ) _a = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: _a = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **__UpperCAmelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: _a = self._pad_tensors_to_max_len(__UpperCAmelCase , gen_kwargs['''max_length'''] ) _a = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data _a , _a = self._compute_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _a = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) _a = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: _a = self._pad_tensors_to_max_len(__UpperCAmelCase , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: # If PAD token is not defined at least EOS token has to be defined _a = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' F' padded to `max_length`={max_length}' ) _a = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) _a = tensor return padded_tensor
320
"""simple docstring""" from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge __snake_case = [ '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the''' ''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe''' ''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''', '''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal''' ''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s''' ''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the''' ''' body.''', '''Amnesty International releases its annual report on the death penalty. The report catalogs the use of''' ''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the''' ''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital''' ''' punishment.''', ] __snake_case = [ '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''' ''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz''' ''' had informed his Lufthansa training school of an episode of severe depression, airline says .''', '''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .''' ''' Israel and the United States opposed the move, which could open the door to war crimes investigations against''' ''' Israelis .''', '''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to''' ''' death . Organization claims that governments around the world are using the threat of terrorism to advance''' ''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death''' ''' sentences up by 28% .''', ] def A_ ( ): """simple docstring""" _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase ) _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2'''] ) assert ( pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean() ) def A_ ( ): """simple docstring""" _a = '''rougeLsum''' _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k] assert score > score_no_sep def A_ ( ): """simple docstring""" _a = ['''rouge1''', '''rouge2''', '''rougeL'''] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase ) _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase ) assert score_sep == score_no_sep def A_ ( ): """simple docstring""" _a = [ '''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''', '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''', ] _a = [ '''Margot Frank, died in 1945, a month earlier than previously thought.''', '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of''' ''' the final seconds on board Flight 9525.''', ] assert calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) def A_ ( ): """simple docstring""" _a = [ '''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" ''' ] _a = [ ''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .''' ] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=_lowerCAmelCase )['''rougeLsum'''] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum'''] assert new_score > prev_score def A_ ( ): """simple docstring""" _a = Path('''examples/seq2seq/test_data/wmt_en_ro''' ) _a = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase ) _a = calculate_rouge_path( data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=_lowerCAmelCase ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __snake_case = { '''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''], '''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''MaskFormerFeatureExtractor'''] __snake_case = ['''MaskFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MaskFormerForInstanceSegmentation''', '''MaskFormerModel''', '''MaskFormerPreTrainedModel''', ] __snake_case = [ '''MaskFormerSwinBackbone''', '''MaskFormerSwinModel''', '''MaskFormerSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor __snake_case = logging.get_logger(__name__) class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: warnings.warn( '''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
320
1
"""simple docstring""" import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def _UpperCAmelCase ( self ) -> Any: _a , _a = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) _a = '''A painting of a squirrel eating a burger''' _a = jax.device_count() _a = num_samples * [prompt] _a = sd_pipe.prepare_inputs(__UpperCAmelCase ) _a = replicate(__UpperCAmelCase ) _a = shard(__UpperCAmelCase ) _a = jax.random.PRNGKey(0 ) _a = jax.random.split(__UpperCAmelCase , jax.device_count() ) _a = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=25 , jit=__UpperCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _a = images[0, 253:256, 253:256, -1] _a = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _a = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _UpperCAmelCase ( self ) -> int: _a = '''stabilityai/stable-diffusion-2''' _a , _a = FlaxDPMSolverMultistepScheduler.from_pretrained(__UpperCAmelCase , subfolder='''scheduler''' ) _a , _a = FlaxStableDiffusionPipeline.from_pretrained( __UpperCAmelCase , scheduler=__UpperCAmelCase , revision='''bf16''' , dtype=jnp.bfloataa , ) _a = scheduler_params _a = '''A painting of a squirrel eating a burger''' _a = jax.device_count() _a = num_samples * [prompt] _a = sd_pipe.prepare_inputs(__UpperCAmelCase ) _a = replicate(__UpperCAmelCase ) _a = shard(__UpperCAmelCase ) _a = jax.random.PRNGKey(0 ) _a = jax.random.split(__UpperCAmelCase , jax.device_count() ) _a = sd_pipe(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_inference_steps=25 , jit=__UpperCAmelCase )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _a = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _a = images[0, 253:256, 253:256, -1] _a = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _a = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] ) print(F'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
320
"""simple docstring""" from __future__ import annotations def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ): """simple docstring""" if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" from __future__ import annotations import numpy as np def A_ ( _lowerCAmelCase : list[float] ): """simple docstring""" return np.maximum(0, _lowerCAmelCase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
320
"""simple docstring""" def A_ ( ): """simple docstring""" _a = [] _a = 1 while len(_lowerCAmelCase ) < 1e6: constant.append(str(_lowerCAmelCase ) ) i += 1 _a = ''''''.join(_lowerCAmelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[9_99] ) * int(constant[99_99] ) * int(constant[9_99_99] ) * int(constant[99_99_99] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" import math def A_ ( ): """simple docstring""" _a = input('''Enter message: ''' ) _a = int(input(f'Enter key [2-{len(_lowerCAmelCase ) - 1}]: ' ) ) _a = input('''Encryption/Decryption [e/d]: ''' ) if mode.lower().startswith('''e''' ): _a = encrypt_message(_lowerCAmelCase, _lowerCAmelCase ) elif mode.lower().startswith('''d''' ): _a = decrypt_message(_lowerCAmelCase, _lowerCAmelCase ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f'Output:\n{text + "|"}' ) def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : str ): """simple docstring""" _a = [''''''] * key for col in range(_lowerCAmelCase ): _a = col while pointer < len(_lowerCAmelCase ): cipher_text[col] += message[pointer] pointer += key return "".join(_lowerCAmelCase ) def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : str ): """simple docstring""" _a = math.ceil(len(_lowerCAmelCase ) / key ) _a = key _a = (num_cols * num_rows) - len(_lowerCAmelCase ) _a = [''''''] * num_cols _a = 0 _a = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): _a = 0 row += 1 return "".join(_lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
320
"""simple docstring""" import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __snake_case = logging.get_logger(__name__) __snake_case = { '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''', # See all BART models at https://huggingface.co/models?filter=bart } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[str] = 'bart' A_ : Optional[Any] = ['past_key_values'] A_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Tuple: _a = vocab_size _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = encoder_layerdrop _a = decoder_layerdrop _a = classifier_dropout _a = use_cache _a = encoder_layers _a = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): _a = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' '''The config can simply be saved and uploaded again to be fixed.''' ) class __lowerCamelCase ( a__ ): '''simple docstring''' @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _a = {0: '''batch'''} _a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: _a = {0: '''batch''', 1: '''decoder_sequence'''} _a = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _a , _a = self.num_layers for i in range(__UpperCAmelCase ): _a = {0: '''batch''', 2: '''past_sequence + sequence'''} _a = {0: '''batch''', 2: '''past_sequence + sequence'''} else: _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _a = super().outputs else: _a = super(__UpperCAmelCase , self ).outputs if self.use_past: _a , _a = self.num_layers for i in range(__UpperCAmelCase ): _a = {0: '''batch''', 2: '''past_sequence + sequence'''} _a = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs _a = seq_length if not self.use_past else 1 _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} _a = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape _a = common_inputs['''decoder_input_ids'''].shape[1] _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = decoder_seq_length + 3 _a = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _a = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) _a = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _a , _a = self.num_layers _a = min(__UpperCAmelCase , __UpperCAmelCase ) _a = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers _a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. _a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _a = seqlen + 2 _a , _a = self.num_layers _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = common_inputs['''attention_mask'''].dtype _a = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) _a = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _a = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _a = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) _a = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence _a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size _a = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _a = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": _a = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: if self.task in ["default", "seq2seq-lm"]: _a = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: _a = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
320
1
"""simple docstring""" from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge __snake_case = [ '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the''' ''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe''' ''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''', '''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal''' ''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s''' ''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the''' ''' body.''', '''Amnesty International releases its annual report on the death penalty. The report catalogs the use of''' ''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the''' ''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital''' ''' punishment.''', ] __snake_case = [ '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''' ''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz''' ''' had informed his Lufthansa training school of an episode of severe depression, airline says .''', '''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .''' ''' Israel and the United States opposed the move, which could open the door to war crimes investigations against''' ''' Israelis .''', '''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to''' ''' death . Organization claims that governments around the world are using the threat of terrorism to advance''' ''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death''' ''' sentences up by 28% .''', ] def A_ ( ): """simple docstring""" _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase ) _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2'''] ) assert ( pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean() ) def A_ ( ): """simple docstring""" _a = '''rougeLsum''' _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k] assert score > score_no_sep def A_ ( ): """simple docstring""" _a = ['''rouge1''', '''rouge2''', '''rougeL'''] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase ) _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase ) assert score_sep == score_no_sep def A_ ( ): """simple docstring""" _a = [ '''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''', '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''', ] _a = [ '''Margot Frank, died in 1945, a month earlier than previously thought.''', '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of''' ''' the final seconds on board Flight 9525.''', ] assert calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) def A_ ( ): """simple docstring""" _a = [ '''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" ''' ] _a = [ ''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .''' ] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=_lowerCAmelCase )['''rougeLsum'''] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum'''] assert new_score > prev_score def A_ ( ): """simple docstring""" _a = Path('''examples/seq2seq/test_data/wmt_en_ro''' ) _a = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase ) _a = calculate_rouge_path( data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=_lowerCAmelCase ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
320
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A_ ( _lowerCAmelCase : Dict ): """simple docstring""" if ( (cp >= 0x4e00 and cp <= 0x9fff) or (cp >= 0x3400 and cp <= 0x4dbf) # or (cp >= 0x2_0000 and cp <= 0x2_a6df) # or (cp >= 0x2_a700 and cp <= 0x2_b73f) # or (cp >= 0x2_b740 and cp <= 0x2_b81f) # or (cp >= 0x2_b820 and cp <= 0x2_ceaf) # or (cp >= 0xf900 and cp <= 0xfaff) or (cp >= 0x2_f800 and cp <= 0x2_fa1f) # ): # return True return False def A_ ( _lowerCAmelCase : str ): """simple docstring""" for char in word: _a = ord(_lowerCAmelCase ) if not _is_chinese_char(_lowerCAmelCase ): return 0 return 1 def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" _a = set() for token in tokens: _a = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase ) if chinese_word: word_set.add(_lowerCAmelCase ) _a = list(_lowerCAmelCase ) return word_list def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens _a = max([len(_lowerCAmelCase ) for w in chinese_word_set] ) _a = bert_tokens _a , _a = 0, len(_lowerCAmelCase ) while start < end: _a = True if is_chinese(bert_word[start] ): _a = min(end - start, _lowerCAmelCase ) for i in range(_lowerCAmelCase, 1, -1 ): _a = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1, start + i ): _a = '''##''' + bert_word[j] _a = start + i _a = False break if single_word: start += 1 return bert_word def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : LTP, _lowerCAmelCase : BertTokenizer ): """simple docstring""" _a = [] for i in range(0, len(_lowerCAmelCase ), 1_00 ): _a = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws _a = [get_chinese_word(_lowerCAmelCase ) for r in res] ltp_res.extend(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) _a = [] for i in range(0, len(_lowerCAmelCase ), 1_00 ): _a = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=_lowerCAmelCase, truncation=_lowerCAmelCase, max_length=5_12 ) bert_res.extend(res['''input_ids'''] ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) _a = [] for input_ids, chinese_word in zip(_lowerCAmelCase, _lowerCAmelCase ): _a = [] for id in input_ids: _a = bert_tokenizer._convert_id_to_token(_lowerCAmelCase ) input_tokens.append(_lowerCAmelCase ) _a = add_sub_symbol(_lowerCAmelCase, _lowerCAmelCase ) _a = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_lowerCAmelCase ): if token[:2] == "##": _a = token[2:] # save chinese tokens' pos if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ): ref_id.append(_lowerCAmelCase ) ref_ids.append(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) return ref_ids def A_ ( _lowerCAmelCase : Any ): """simple docstring""" with open(args.file_name, '''r''', encoding='''utf-8''' ) as f: _a = f.readlines() _a = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _a = LTP(args.ltp ) # faster in GPU device _a = BertTokenizer.from_pretrained(args.bert ) _a = prepare_ref(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) with open(args.save_path, '''w''', encoding='''utf-8''' ) as f: _a = [json.dumps(_lowerCAmelCase ) + '''\n''' for ref in ref_ids] f.writelines(_lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', required=False, type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', required=False, type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''', ) parser.add_argument( '''--bert''', required=False, type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''', ) parser.add_argument( '''--save_path''', required=False, type=str, default='''./resources/ref.txt''', help='''path to save res''', ) __snake_case = parser.parse_args() main(args)
320
1
"""simple docstring""" def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : list[list[int]] ): """simple docstring""" def update_area_of_max_square(_lowerCAmelCase : int, _lowerCAmelCase : int ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 _a = update_area_of_max_square(_lowerCAmelCase, col + 1 ) _a = update_area_of_max_square(row + 1, col + 1 ) _a = update_area_of_max_square(row + 1, _lowerCAmelCase ) if mat[row][col]: _a = 1 + min([right, diagonal, down] ) _a = max(largest_square_area[0], _lowerCAmelCase ) return sub_problem_sol else: return 0 _a = [0] update_area_of_max_square(0, 0 ) return largest_square_area[0] def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : list[list[int]] ): """simple docstring""" def update_area_of_max_square_using_dp_array( _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : list[list[int]] ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] _a = update_area_of_max_square_using_dp_array(_lowerCAmelCase, col + 1, _lowerCAmelCase ) _a = update_area_of_max_square_using_dp_array(row + 1, col + 1, _lowerCAmelCase ) _a = update_area_of_max_square_using_dp_array(row + 1, _lowerCAmelCase, _lowerCAmelCase ) if mat[row][col]: _a = 1 + min([right, diagonal, down] ) _a = max(largest_square_area[0], _lowerCAmelCase ) _a = sub_problem_sol return sub_problem_sol else: return 0 _a = [0] _a = [[-1] * cols for _ in range(_lowerCAmelCase )] update_area_of_max_square_using_dp_array(0, 0, _lowerCAmelCase ) return largest_square_area[0] def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : list[list[int]] ): """simple docstring""" _a = [[0] * (cols + 1) for _ in range(rows + 1 )] _a = 0 for row in range(rows - 1, -1, -1 ): for col in range(cols - 1, -1, -1 ): _a = dp_array[row][col + 1] _a = dp_array[row + 1][col + 1] _a = dp_array[row + 1][col] if mat[row][col] == 1: _a = 1 + min(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _a = max(dp_array[row][col], _lowerCAmelCase ) else: _a = 0 return largest_square_area def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : int, _lowerCAmelCase : list[list[int]] ): """simple docstring""" _a = [0] * (cols + 1) _a = [0] * (cols + 1) _a = 0 for row in range(rows - 1, -1, -1 ): for col in range(cols - 1, -1, -1 ): _a = current_row[col + 1] _a = next_row[col + 1] _a = next_row[col] if mat[row][col] == 1: _a = 1 + min(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _a = max(current_row[col], _lowerCAmelCase ) else: _a = 0 _a = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
320
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[Any] = 'gptj' A_ : Optional[int] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Union[str, Any]: _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = rotary_dim _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = use_cache _a = bos_token_id _a = eos_token_id super().__init__( bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase ) class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[Any]: super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ): # TODO: how to do that better? _a = 0 @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) _a = {0: '''batch''', 1: '''past_sequence + sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _UpperCAmelCase ( self ) -> int: return self._config.n_layer @property def _UpperCAmelCase ( self ) -> int: return self._config.n_head def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = super(__UpperCAmelCase , self ).generate_dummy_inputs( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) # We need to order the input in the way they appears in the forward() _a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _a = seqlen + 2 _a = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _a = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers ) ] _a = common_inputs['''attention_mask'''] if self.use_past: _a = ordered_inputs['''attention_mask'''].dtype _a = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _UpperCAmelCase ( self ) -> int: return 13
320
1
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Dict = ['image_processor', 'tokenizer'] A_ : Optional[int] = 'OwlViTImageProcessor' A_ : Any = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]: _a = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __UpperCAmelCase , ) _a = kwargs.pop('''feature_extractor''' ) _a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ) -> Tuple: if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''' ) if text is not None: if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )): _a = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )] elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ): _a = [] # Maximum number of queries across batch _a = max([len(__UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__UpperCAmelCase ) != max_num_queries: _a = t + [''' '''] * (max_num_queries - len(__UpperCAmelCase )) _a = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) encodings.append(__UpperCAmelCase ) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' ) if return_tensors == "np": _a = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) _a = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _a = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) _a = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _a = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 ) _a = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _a = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 ) _a = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 ) else: raise ValueError('''Target return tensor type could not be returned''' ) _a = BatchEncoding() _a = input_ids _a = attention_mask if query_images is not None: _a = BatchEncoding() _a = self.image_processor( __UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values _a = query_pixel_values if images is not None: _a = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: _a = image_features.pixel_values return encoding elif query_images is not None and images is not None: _a = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def _UpperCAmelCase ( self ) -> str: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __UpperCAmelCase , ) return self.image_processor_class @property def _UpperCAmelCase ( self ) -> Optional[int]: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __UpperCAmelCase , ) return self.image_processor
320
"""simple docstring""" import os import sys import unittest __snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') __snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> str: _a = get_test_to_tester_mapping(__UpperCAmelCase ) _a = get_test_to_tester_mapping(__UpperCAmelCase ) _a = {'''BertModelTest''': '''BertModelTester'''} _a = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = get_model_to_test_mapping(__UpperCAmelCase ) _a = get_model_to_test_mapping(__UpperCAmelCase ) _a = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } _a = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = get_model_to_tester_mapping(__UpperCAmelCase ) _a = get_model_to_tester_mapping(__UpperCAmelCase ) _a = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } _a = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
320
1
"""simple docstring""" def A_ ( _lowerCAmelCase : int ): """simple docstring""" _a = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def A_ ( _lowerCAmelCase : int = 1_00 ): """simple docstring""" _a = 1 _a = 2 for i in range(2, max_n + 1 ): _a = pre_numerator _a = 2 * i // 3 if i % 3 == 0 else 1 _a = cur_numerator _a = e_cont * pre_numerator + temp return sum_digits(_lowerCAmelCase ) if __name__ == "__main__": print(f'{solution() = }')
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __lowerCamelCase : '''simple docstring''' @staticmethod def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: pass def A_ ( _lowerCAmelCase : Image ): """simple docstring""" _a = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def A_ ( _lowerCAmelCase : Image ): """simple docstring""" _a = np.array(_lowerCAmelCase ) _a = npimg.shape return {"hash": hashimage(_lowerCAmelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' A_ : Any = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) A_ : str = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: _a = MaskGenerationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int: pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def _UpperCAmelCase ( self ) -> List[str]: pass @slow @require_torch def _UpperCAmelCase ( self ) -> int: _a = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) _a = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 ) # Shortening by hashing _a = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871} ] , ) # fmt: on @require_torch @slow def _UpperCAmelCase ( self ) -> Any: _a = '''facebook/sam-vit-huge''' _a = pipeline('''mask-generation''' , model=__UpperCAmelCase ) _a = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing _a = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053}, ] , )
320
1
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 255 , __UpperCAmelCase=True , ) -> Optional[int]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _a = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} _a = parent _a = batch_size _a = num_channels _a = min_resolution _a = max_resolution _a = do_resize _a = size _a = do_normalize _a = image_mean _a = image_std _a = do_rescale _a = rescale_factor _a = do_pad def _UpperCAmelCase ( self ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ) -> List[str]: if not batched: _a = image_inputs[0] if isinstance(__UpperCAmelCase , Image.Image ): _a , _a = image.size else: _a , _a = image.shape[1], image.shape[2] if w < h: _a = int(self.size['''shortest_edge'''] * h / w ) _a = self.size['''shortest_edge'''] elif w > h: _a = self.size['''shortest_edge'''] _a = int(self.size['''shortest_edge'''] * w / h ) else: _a = self.size['''shortest_edge'''] _a = self.size['''shortest_edge'''] else: _a = [] for image in image_inputs: _a , _a = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _a = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0] _a = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowerCamelCase ( a__ , unittest.TestCase ): '''simple docstring''' A_ : List[Any] = YolosImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> int: _a = YolosImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> Optional[Any]: _a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) ) def _UpperCAmelCase ( self ) -> List[Any]: _a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , __UpperCAmelCase ) _a = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCAmelCase ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> int: pass def _UpperCAmelCase ( self ) -> int: # Initialize image_processing _a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image ) # Test not batched input _a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) _a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _UpperCAmelCase ( self ) -> Optional[Any]: # Initialize image_processing _a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray ) # Test not batched input _a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values _a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _UpperCAmelCase ( self ) -> Optional[int]: # Initialize image_processing _a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) # Test not batched input _a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values _a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values _a , _a = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _UpperCAmelCase ( self ) -> Union[str, Any]: # Initialize image_processings _a = self.image_processing_class(**self.image_processor_dict ) _a = self.image_processing_class(do_resize=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_rescale=__UpperCAmelCase ) # create random PyTorch tensors _a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors _a = image_processing_a.pad(__UpperCAmelCase , return_tensors='''pt''' ) _a = image_processing_a(__UpperCAmelCase , return_tensors='''pt''' ) self.assertTrue( torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> str: # prepare image and target _a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: _a = json.loads(f.read() ) _a = {'''image_id''': 39769, '''annotations''': target} # encode them _a = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' ) _a = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='''pt''' ) # verify pixel values _a = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase ) _a = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1e-4 ) ) # verify area _a = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) ) # verify boxes _a = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase ) _a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1e-3 ) ) # verify image_id _a = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) ) # verify is_crowd _a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) ) # verify class_labels _a = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) ) # verify orig_size _a = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) ) # verify size _a = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) ) @slow def _UpperCAmelCase ( self ) -> Dict: # prepare image, target and masks_path _a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: _a = json.loads(f.read() ) _a = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} _a = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them _a = YolosImageProcessor(format='''coco_panoptic''' ) _a = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='''pt''' ) # verify pixel values _a = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , __UpperCAmelCase ) _a = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __UpperCAmelCase , atol=1e-4 ) ) # verify area _a = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __UpperCAmelCase ) ) # verify boxes _a = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __UpperCAmelCase ) _a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __UpperCAmelCase , atol=1e-3 ) ) # verify image_id _a = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __UpperCAmelCase ) ) # verify is_crowd _a = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __UpperCAmelCase ) ) # verify class_labels _a = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __UpperCAmelCase ) ) # verify masks _a = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __UpperCAmelCase ) # verify orig_size _a = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __UpperCAmelCase ) ) # verify size _a = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __UpperCAmelCase ) )
320
"""simple docstring""" import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]: _a = parent _a = batch_size _a = encoder_seq_length _a = decoder_seq_length # For common tests _a = self.decoder_seq_length _a = is_training _a = use_attention_mask _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = d_ff _a = relative_attention_num_buckets _a = dropout_rate _a = initializer_factor _a = eos_token_id _a = pad_token_id _a = decoder_start_token_id _a = None _a = decoder_layers def _UpperCAmelCase ( self ) -> Dict: return TaConfig.from_pretrained('''google/umt5-base''' ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]: if attention_mask is None: _a = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _a = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase ) if decoder_head_mask is None: _a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase ) if cross_attn_head_mask is None: _a = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def _UpperCAmelCase ( self ) -> Tuple: _a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) _a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _a = input_ids.clamp(self.pad_token_id + 1 ) _a = decoder_input_ids.clamp(self.pad_token_id + 1 ) _a = self.get_config() _a = config.num_attention_heads _a = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, input_dict def _UpperCAmelCase ( self ) -> int: _a , _a = self.prepare_config_and_inputs() return config, inputs_dict def _UpperCAmelCase ( self ) -> Tuple: return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _UpperCAmelCase ( self ) -> List[str]: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict: _a = UMTaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _a = model( input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , ) _a = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ) _a = result.last_hidden_state _a = result.past_key_values _a = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]: _a = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval() # first forward pass _a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _a = model(__UpperCAmelCase ) _a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _a , _a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _a = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and _a = torch.cat([input_ids, next_tokens] , dim=-1 ) _a = model(__UpperCAmelCase )['''last_hidden_state'''] _a = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state'''] # select random slice _a = ids_tensor((1,) , output_from_past.shape[-1] ).item() _a = output_from_no_past[:, -1, random_slice_idx].detach() _a = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]: _a = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval() _a = model(**__UpperCAmelCase )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() ) @require_torch class __lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) A_ : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () A_ : int = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) A_ : str = True A_ : List[str] = False A_ : List[Any] = False A_ : str = True A_ : List[str] = True # The small UMT5 model needs higher percentages for CPU/MP tests A_ : Optional[Any] = [0.8, 0.9] def _UpperCAmelCase ( self ) -> Tuple: _a = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def _UpperCAmelCase ( self ) -> int: _a = self.model_tester.prepare_config_and_inputs() _a = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( __UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] _a = self.model_tester.prepare_config_and_inputs() _a = config_and_inputs[0] _a = UMTaForConditionalGeneration(__UpperCAmelCase ).eval() model.to(__UpperCAmelCase ) _a = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ), } for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ): _a = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _a = torch.ones( config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ) _a = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , ) # We check the state of decoder_attentions and cross_attentions just from the last step _a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def _UpperCAmelCase ( self ) -> int: pass @require_torch @require_sentencepiece @require_tokenizers class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def _UpperCAmelCase ( self ) -> Optional[int]: _a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase ) _a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase ) _a = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] _a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids # fmt: off _a = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase ) _a = model.generate(input_ids.to(__UpperCAmelCase ) ) _a = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] _a = tokenizer.batch_decode(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __snake_case = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
320
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class __lowerCamelCase : '''simple docstring''' def __init__( self ) -> Tuple: _a = {} def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int: if self.graph.get(__UpperCAmelCase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: _a = [[w, v]] if not self.graph.get(__UpperCAmelCase ): _a = [] def _UpperCAmelCase ( self ) -> int: return list(self.graph ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]: if s == d: return [] _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple: if c == -1: _a = floor(random() * 10000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _a = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]: _a = deque() _a = [] if s == -2: _a = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple: _a = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict: return len(self.graph[u] ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s _a = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return sorted_nodes def _UpperCAmelCase ( self ) -> Optional[int]: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Any: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]: _a = time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _a = time() return end - begin def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]: _a = time() self.bfs(__UpperCAmelCase ) _a = time() return end - begin class __lowerCamelCase : '''simple docstring''' def __init__( self ) -> Optional[int]: _a = {} def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict: # check if the u exists if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist _a = [[w, v]] # add the other way if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist _a = [[w, u]] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) # the other way round if self.graph.get(__UpperCAmelCase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict: if s == d: return [] _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple: if c == -1: _a = floor(random() * 10000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _a = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]: _a = deque() _a = [] if s == -2: _a = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict: return len(self.graph[u] ) def _UpperCAmelCase ( self ) -> int: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _UpperCAmelCase ( self ) -> Union[str, Any]: return list(self.graph ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple: _a = time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _a = time() return end - begin def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _a = time() self.bfs(__UpperCAmelCase ) _a = time() return end - begin
320
1
"""simple docstring""" def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : int ): """simple docstring""" _a = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): _a = n - k # Calculate C(n,k) for i in range(_lowerCAmelCase ): result *= n - i result //= i + 1 return result def A_ ( _lowerCAmelCase : int ): """simple docstring""" return binomial_coefficient(2 * node_count, _lowerCAmelCase ) // (node_count + 1) def A_ ( _lowerCAmelCase : int ): """simple docstring""" if n < 0: raise ValueError('''factorial() not defined for negative values''' ) _a = 1 for i in range(1, n + 1 ): result *= i return result def A_ ( _lowerCAmelCase : int ): """simple docstring""" return catalan_number(_lowerCAmelCase ) * factorial(_lowerCAmelCase ) if __name__ == "__main__": __snake_case = int(input('''Enter the number of nodes: ''').strip() or 0) if node_count <= 0: raise ValueError('''We need some nodes to work with.''') print( f'Given {node_count} nodes, there are {binary_tree_count(node_count)} ' f'binary trees and {catalan_number(node_count)} binary search trees.' )
320
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Dict = 'unispeech' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) -> Union[str, Any]: super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) _a = hidden_size _a = feat_extract_norm _a = feat_extract_activation _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = conv_bias _a = num_conv_pos_embeddings _a = num_conv_pos_embedding_groups _a = len(self.conv_dim ) _a = num_hidden_layers _a = intermediate_size _a = hidden_act _a = num_attention_heads _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = feat_proj_dropout _a = final_dropout _a = layerdrop _a = layer_norm_eps _a = initializer_range _a = num_ctc_classes _a = vocab_size _a = do_stable_layer_norm _a = use_weighted_layer_sum _a = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a = apply_spec_augment _a = mask_time_prob _a = mask_time_length _a = mask_time_min_masks _a = mask_feature_prob _a = mask_feature_length _a = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _a = num_codevectors_per_group _a = num_codevector_groups _a = contrastive_logits_temperature _a = feat_quantizer_dropout _a = num_negatives _a = codevector_dim _a = proj_codevector_dim _a = diversity_loss_weight # ctc loss _a = ctc_loss_reduction _a = ctc_zero_infinity # pretraining loss _a = replace_prob @property def _UpperCAmelCase ( self ) -> Optional[int]: return functools.reduce(operator.mul , self.conv_stride , 1 )
320
1
"""simple docstring""" import math def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float ): """simple docstring""" if ( not isinstance(_lowerCAmelCase, (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * power_factor def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float ): """simple docstring""" if ( not isinstance(_lowerCAmelCase, (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: __snake_case = None __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } __snake_case = { '''google/rembert''': 256, } __snake_case = '''▁''' class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Optional[Any] = VOCAB_FILES_NAMES A_ : List[str] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : List[Any] = RemBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it _a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) _a = do_lower_case _a = remove_space _a = keep_accents _a = vocab_file _a = False if not self.vocab_file else True def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(__UpperCAmelCase ) ) return _a = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
320
1
"""simple docstring""" import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def A_ ( _lowerCAmelCase : Dict ): """simple docstring""" _a = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _a = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: _a = 4 _a = 48 _a = '''pixelshuffle_aux''' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _a = [6, 6, 6, 6] _a = 60 _a = [6, 6, 6, 6] _a = '''pixelshuffledirect''' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _a = 4 _a = '''nearest+conv''' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: _a = 1 _a = 1 _a = 1_26 _a = 7 _a = 2_5_5.0 _a = '''''' return config def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any] ): """simple docstring""" if "patch_embed.proj" in name and "layers" not in name: _a = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: _a = name.replace('''patch_embed.norm''', '''embeddings.patch_embeddings.layernorm''' ) if "layers" in name: _a = name.replace('''layers''', '''encoder.stages''' ) if "residual_group.blocks" in name: _a = name.replace('''residual_group.blocks''', '''layers''' ) if "attn.proj" in name: _a = name.replace('''attn.proj''', '''attention.output.dense''' ) if "attn" in name: _a = name.replace('''attn''', '''attention.self''' ) if "norm1" in name: _a = name.replace('''norm1''', '''layernorm_before''' ) if "norm2" in name: _a = name.replace('''norm2''', '''layernorm_after''' ) if "mlp.fc1" in name: _a = name.replace('''mlp.fc1''', '''intermediate.dense''' ) if "mlp.fc2" in name: _a = name.replace('''mlp.fc2''', '''output.dense''' ) if "q_bias" in name: _a = name.replace('''q_bias''', '''query.bias''' ) if "k_bias" in name: _a = name.replace('''k_bias''', '''key.bias''' ) if "v_bias" in name: _a = name.replace('''v_bias''', '''value.bias''' ) if "cpb_mlp" in name: _a = name.replace('''cpb_mlp''', '''continuous_position_bias_mlp''' ) if "patch_embed.proj" in name: _a = name.replace('''patch_embed.proj''', '''patch_embed.projection''' ) if name == "norm.weight": _a = '''layernorm.weight''' if name == "norm.bias": _a = '''layernorm.bias''' if "conv_first" in name: _a = name.replace('''conv_first''', '''first_convolution''' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: _a = name.replace('''conv_last''', '''final_convolution''' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: _a = name.replace('''conv_before_upsample.0''', '''conv_before_upsample''' ) if "upsample.0" in name: _a = name.replace('''upsample.0''', '''upsample.convolution_0''' ) if "upsample.2" in name: _a = name.replace('''upsample.2''', '''upsample.convolution_1''' ) _a = '''upsample.''' + name elif config.upsampler == "pixelshuffledirect": _a = name.replace('''upsample.0.weight''', '''upsample.conv.weight''' ) _a = name.replace('''upsample.0.bias''', '''upsample.conv.bias''' ) else: pass else: _a = '''swin2sr.''' + name return name def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : List[Any] ): """simple docstring""" for key in orig_state_dict.copy().keys(): _a = orig_state_dict.pop(_lowerCAmelCase ) if "qkv" in key: _a = key.split('''.''' ) _a = int(key_split[1] ) _a = int(key_split[4] ) _a = config.embed_dim if "weight" in key: _a = val[:dim, :] _a = val[dim : dim * 2, :] _a = val[-dim:, :] else: _a = val[:dim] _a = val[dim : dim * 2] _a = val[-dim:] pass else: _a = val return orig_state_dict def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any] ): """simple docstring""" _a = get_config(_lowerCAmelCase ) _a = SwinaSRForImageSuperResolution(_lowerCAmelCase ) model.eval() _a = torch.hub.load_state_dict_from_url(_lowerCAmelCase, map_location='''cpu''' ) _a = convert_state_dict(_lowerCAmelCase, _lowerCAmelCase ) _a , _a = model.load_state_dict(_lowerCAmelCase, strict=_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: raise ValueError('''Missing keys when converting: {}'''.format(_lowerCAmelCase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f'Unexpected key {key} in state_dict' ) # verify values _a = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true''' _a = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw ).convert('''RGB''' ) _a = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values _a = 1_26 if '''Jpeg''' in checkpoint_url else 2_56 _a = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) _a = transforms(_lowerCAmelCase ).unsqueeze(0 ) if config.num_channels == 1: _a = pixel_values[:, 0, :, :].unsqueeze(1 ) _a = model(_lowerCAmelCase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: _a = torch.Size([1, 3, 5_12, 5_12] ) _a = torch.tensor( [[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: _a = torch.Size([1, 3, 10_24, 10_24] ) _a = torch.tensor( [[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here _a = torch.Size([1, 3, 10_24, 10_24] ) _a = torch.tensor( [[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: _a = torch.Size([1, 3, 5_12, 5_12] ) _a = torch.tensor( [[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: _a = torch.Size([1, 3, 10_24, 10_24] ) _a = torch.tensor( [[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] ) assert ( outputs.reconstruction.shape == expected_shape ), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], _lowerCAmelCase, atol=1e-3 ) print('''Looks ok!''' ) _a = { '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': ( '''swin2SR-classical-sr-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': ( '''swin2SR-classical-sr-x4-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': ( '''swin2SR-compressed-sr-x4-48''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': ( '''swin2SR-lightweight-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': ( '''swin2SR-realworld-sr-x4-64-bsrgan-psnr''' ), } _a = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(_lowerCAmelCase ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(_lowerCAmelCase ) if push_to_hub: model.push_to_hub(f'caidas/{model_name}' ) processor.push_to_hub(f'caidas/{model_name}' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') __snake_case = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __snake_case = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=224 , __UpperCAmelCase=1000 , __UpperCAmelCase=[3, 3, 6, 4] , __UpperCAmelCase=[48, 56, 112, 220] , ) -> str: _a = parent _a = batch_size _a = num_channels _a = is_training _a = use_labels _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = num_labels _a = image_size _a = layer_depths _a = embed_dims def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.num_labels ) _a = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> List[str]: return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__UpperCAmelCase , layer_scale_init_value=1e-5 , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: _a = SwiftFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: _a = self.num_labels _a = SwiftFormerForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _a = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) _a = SwiftFormerForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self ) -> List[str]: ((_a) , (_a) , (_a)) = self.prepare_config_and_inputs() _a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCamelCase ( a__ , a__ , unittest.TestCase ): '''simple docstring''' A_ : Dict = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () A_ : Optional[int] = ( {'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification} if is_torch_available() else {} ) A_ : Dict = False A_ : List[Any] = False A_ : Any = False A_ : Optional[int] = False A_ : Optional[int] = False def _UpperCAmelCase ( self ) -> List[str]: _a = SwiftFormerModelTester(self ) _a = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def _UpperCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' ) def _UpperCAmelCase ( self ) -> Optional[Any]: pass def _UpperCAmelCase ( self ) -> Union[str, Any]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(__UpperCAmelCase ) _a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def _UpperCAmelCase ( self ) -> List[str]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(__UpperCAmelCase ) _a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a = [*signature.parameters.keys()] _a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> str: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a = SwiftFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='''SwiftFormer does not output attentions''' ) def _UpperCAmelCase ( self ) -> List[str]: pass def _UpperCAmelCase ( self ) -> Any: def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _a = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) _a = outputs.hidden_states _a = 8 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(__UpperCAmelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> int: def _config_zero_init(__UpperCAmelCase ): _a = copy.deepcopy(__UpperCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(__UpperCAmelCase , __UpperCAmelCase , 1e-1_0 ) if isinstance(getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ): _a = _config_zero_init(getattr(__UpperCAmelCase , __UpperCAmelCase ) ) setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return configs_no_init _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = _config_zero_init(__UpperCAmelCase ) for model_class in self.all_model_classes: _a = model_class(config=__UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: pass def A_ ( ): """simple docstring""" _a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def _UpperCAmelCase ( self ) -> List[Any]: return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None @slow def _UpperCAmelCase ( self ) -> Dict: _a = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(__UpperCAmelCase ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): _a = model(**__UpperCAmelCase ) # verify the logits _a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) _a = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
320
"""simple docstring""" import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __lowerCamelCase ( a__ ): '''simple docstring''' @require_torch def _UpperCAmelCase ( self ) -> Union[str, Any]: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache _a = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__UpperCAmelCase ) BertModel.from_pretrained(__UpperCAmelCase ) BertTokenizer.from_pretrained(__UpperCAmelCase ) pipeline(task='''fill-mask''' , model=__UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed _a = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a = '''1''' _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache _a = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__UpperCAmelCase ) BertModel.from_pretrained(__UpperCAmelCase ) BertTokenizer.from_pretrained(__UpperCAmelCase ) pipeline(task='''fill-mask''' , model=__UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed _a = self.get_env() _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def _UpperCAmelCase ( self ) -> Optional[Any]: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a = ''' from transformers import BertConfig, BertModel, BertTokenizer ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket ''' # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed _a = self.get_env() _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # next emulate no network _a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a = '''1''' _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def _UpperCAmelCase ( self ) -> Tuple: _a = ''' from transformers import pipeline ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket ''' _a = self.get_env() _a = '''1''' _a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( '''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: _a = ''' from transformers import AutoModel ''' _a = ''' mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") ''' # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed _a = self.get_env() _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a = '''1''' _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() )
320
1
"""simple docstring""" import argparse __snake_case = '''docs/source/_static/js/custom.js''' def A_ ( _lowerCAmelCase : Tuple ): """simple docstring""" with open(_lowerCAmelCase, encoding='''utf-8''', newline='''\n''' ) as f: _a = f.readlines() _a = 0 # First let's put the right version while not lines[index].startswith('''const stableVersion =''' ): index += 1 _a = f'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith('''const versionMapping = {''' ): index += 1 # We go until the end while not lines[index].startswith('''}''' ): index += 1 # We add the new version at the end lines[index - 1] += f' "v{version}": "v{version}",\n' with open(_lowerCAmelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f: f.writelines(_lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') __snake_case = parser.parse_args() update_custom_js(args.version)
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : str = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Any = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Dict = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Tuple = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Any = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ['''flax'''] )
320
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Optional[int] = 'wav2vec2' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="sum" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=(512, 512, 512, 512, 1500) , __UpperCAmelCase=(5, 3, 3, 1, 1) , __UpperCAmelCase=(1, 2, 3, 1, 1) , __UpperCAmelCase=512 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[int]: super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) _a = hidden_size _a = feat_extract_norm _a = feat_extract_activation _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = conv_bias _a = num_conv_pos_embeddings _a = num_conv_pos_embedding_groups _a = len(self.conv_dim ) _a = num_hidden_layers _a = intermediate_size _a = hidden_act _a = num_attention_heads _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = feat_proj_dropout _a = final_dropout _a = layerdrop _a = layer_norm_eps _a = initializer_range _a = vocab_size _a = do_stable_layer_norm _a = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a = apply_spec_augment _a = mask_time_prob _a = mask_time_length _a = mask_time_min_masks _a = mask_feature_prob _a = mask_feature_length _a = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _a = num_codevectors_per_group _a = num_codevector_groups _a = contrastive_logits_temperature _a = feat_quantizer_dropout _a = num_negatives _a = codevector_dim _a = proj_codevector_dim _a = diversity_loss_weight # ctc loss _a = ctc_loss_reduction _a = ctc_zero_infinity # adapter _a = add_adapter _a = adapter_kernel_size _a = adapter_stride _a = num_adapter_layers _a = output_hidden_size or hidden_size _a = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _a = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = xvector_output_dim @property def _UpperCAmelCase ( self ) -> List[Any]: return functools.reduce(operator.mul , self.conv_stride , 1 )
320
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __snake_case = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __snake_case = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' __snake_case = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" def remove_articles(_lowerCAmelCase : Optional[int] ): _a = re.compile(R'''\b(a|an|the)\b''', re.UNICODE ) return re.sub(_lowerCAmelCase, ''' ''', _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase : Tuple ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase : Tuple ): _a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase : List[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ): """simple docstring""" return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ): """simple docstring""" _a = [any(compute_exact(_lowerCAmelCase, _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 1_00 def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : str ): """simple docstring""" _a = [rgram for rgrams in rgramslist for rgram in rgrams] _a = Counter(_lowerCAmelCase ) _a = Counter(_lowerCAmelCase ) _a = Counter() for sgram, scount in sgramcounter.items(): _a = scount * numref _a = Counter(_lowerCAmelCase ) _a = Counter() for cgram, ccount in cgramcounter.items(): _a = ccount * numref # KEEP _a = sgramcounter_rep & cgramcounter_rep _a = keepgramcounter_rep & rgramcounter _a = sgramcounter_rep & rgramcounter _a = 0 _a = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(_lowerCAmelCase ) > 0: _a = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _a = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _a = 0 if keepscore_precision > 0 or keepscore_recall > 0: _a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _a = sgramcounter_rep - cgramcounter_rep _a = delgramcounter_rep - rgramcounter _a = sgramcounter_rep - rgramcounter _a = 0 _a = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 if len(_lowerCAmelCase ) > 0: _a = deltmpscorea / len(_lowerCAmelCase ) # ADDITION _a = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) _a = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) _a = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) _a = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(_lowerCAmelCase ) > 0: _a = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: _a = addtmpscore / len(_lowerCAmelCase ) _a = 0 if addscore_precision > 0 or addscore_recall > 0: _a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Any ): """simple docstring""" _a = len(_lowerCAmelCase ) _a = ssent.split(''' ''' ) _a = csent.split(''' ''' ) _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] for rsent in rsents: _a = rsent.split(''' ''' ) _a = [] _a = [] _a = [] ragramslist.append(_lowerCAmelCase ) for i in range(0, len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: _a = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0, len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: _a = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0, len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: _a = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _a = sum([delascore, delascore, delascore, delascore] ) / 4 _a = sum([addascore, addascore, addascore, addascore] ) / 4 _a = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : bool = True, _lowerCAmelCase : str = "13a", _lowerCAmelCase : bool = True ): """simple docstring""" if lowercase: _a = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _a = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: _a = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": _a = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase, escape=_lowerCAmelCase ) elif tokenizer == "penn": _a = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase ) else: _a = sentence if not return_str: _a = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ): """simple docstring""" if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _a = 0 for src, pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ), normalize(_lowerCAmelCase ), [normalize(_lowerCAmelCase ) for sent in refs] ) _a = sari_score / len(_lowerCAmelCase ) return 1_00 * sari_score def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple, _lowerCAmelCase : Any="exp", _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Union[str, Any]=False, _lowerCAmelCase : Optional[Any]=False, _lowerCAmelCase : List[str]=False, ): """simple docstring""" _a = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _a = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] _a = sacrebleu.corpus_bleu( _lowerCAmelCase, _lowerCAmelCase, smooth_method=_lowerCAmelCase, smooth_value=_lowerCAmelCase, force=_lowerCAmelCase, lowercase=_lowerCAmelCase, use_effective_order=_lowerCAmelCase, ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCamelCase ( datasets.Metric ): '''simple docstring''' def _UpperCAmelCase ( self ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: _a = {} result.update({'''sari''': compute_sari(sources=__UpperCAmelCase , predictions=__UpperCAmelCase , references=__UpperCAmelCase )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} ) result.update({'''exact''': compute_em(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} ) return result
320
1
"""simple docstring""" import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging __snake_case = logging.get_logger(__name__) def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : str ): """simple docstring""" _a = nn.functional.normalize(_lowerCAmelCase ) _a = nn.functional.normalize(_lowerCAmelCase ) return torch.mm(_lowerCAmelCase, normalized_text_embeds.t() ) class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Optional[Any] = CLIPConfig A_ : str = ['CLIPEncoderLayer'] def __init__( self , __UpperCAmelCase ) -> Optional[Any]: super().__init__(__UpperCAmelCase ) _a = CLIPVisionModel(config.vision_config ) _a = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__UpperCAmelCase ) _a = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__UpperCAmelCase ) _a = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__UpperCAmelCase ) _a = nn.Parameter(torch.ones(17 ) , requires_grad=__UpperCAmelCase ) _a = nn.Parameter(torch.ones(3 ) , requires_grad=__UpperCAmelCase ) @torch.no_grad() def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int: _a = self.vision_model(__UpperCAmelCase )[1] # pooled_output _a = self.visual_projection(__UpperCAmelCase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _a = cosine_distance(__UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy() _a = cosine_distance(__UpperCAmelCase , self.concept_embeds ).cpu().float().numpy() _a = [] _a = image_embeds.shape[0] for i in range(__UpperCAmelCase ): _a = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images _a = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): _a = special_cos_dist[i][concept_idx] _a = self.special_care_embeds_weights[concept_idx].item() _a = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) _a = 0.01 for concept_idx in range(len(cos_dist[0] ) ): _a = cos_dist[i][concept_idx] _a = self.concept_embeds_weights[concept_idx].item() _a = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(__UpperCAmelCase ) result.append(__UpperCAmelCase ) _a = [len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: _a = self.vision_model(__UpperCAmelCase )[1] # pooled_output _a = self.visual_projection(__UpperCAmelCase ) _a = cosine_distance(__UpperCAmelCase , self.special_care_embeds ) _a = cosine_distance(__UpperCAmelCase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images _a = 0.0 _a = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) _a = torch.any(special_scores > 0 , dim=1 ) _a = special_care * 0.01 _a = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) _a = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) _a = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
320
"""simple docstring""" def A_ ( _lowerCAmelCase : int = 50 ): """simple docstring""" _a = [1] * (length + 1) for row_length in range(3, length + 1 ): for block_length in range(3, row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
320
1
"""simple docstring""" import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , ) -> Dict: _a = parent _a = vocab_size _a = batch_size _a = image_size _a = patch_size _a = num_channels _a = is_training _a = use_labels _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = type_sequence_label_size _a = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _a = (image_size // patch_size) ** 2 _a = num_patches + 1 def _UpperCAmelCase ( self ) -> Any: _a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) return config, pixel_values, labels def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: _a = FlaxBeitModel(config=__UpperCAmelCase ) _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: _a = FlaxBeitForMaskedImageModeling(config=__UpperCAmelCase ) _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: _a = self.type_sequence_label_size _a = FlaxBeitForImageClassification(config=__UpperCAmelCase ) _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _a = 1 _a = FlaxBeitForImageClassification(__UpperCAmelCase ) _a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _a = model(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Any: _a = self.prepare_config_and_inputs() ( ( _a ) , ( _a ) , ( _a ) , ) = config_and_inputs _a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __lowerCamelCase ( a__ , unittest.TestCase ): '''simple docstring''' A_ : str = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def _UpperCAmelCase ( self ) -> None: _a = FlaxBeitModelTester(self ) _a = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def _UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(__UpperCAmelCase ) _a = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a = [*signature.parameters.keys()] _a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) _a = model_class(__UpperCAmelCase ) @jax.jit def model_jitted(__UpperCAmelCase , **__UpperCAmelCase ): return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase ) with self.subTest('''JIT Enabled''' ): _a = model_jitted(**__UpperCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): _a = model_jitted(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def _UpperCAmelCase ( self ) -> Optional[Any]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Tuple: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> Optional[int]: for model_class_name in self.all_model_classes: _a = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' ) _a = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(__UpperCAmelCase ) def A_ ( ): """simple docstring""" _a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @require_flax class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def _UpperCAmelCase ( self ) -> Tuple: return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None @slow def _UpperCAmelCase ( self ) -> Optional[Any]: _a = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=__UpperCAmelCase , return_tensors='''np''' ).pixel_values # prepare bool_masked_pos _a = np.ones((1, 196) , dtype=__UpperCAmelCase ) # forward pass _a = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase ) _a = outputs.logits # verify the logits _a = (1, 196, 8192) self.assertEqual(logits.shape , __UpperCAmelCase ) _a = np.array( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1e-2 ) ) @slow def _UpperCAmelCase ( self ) -> int: _a = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=__UpperCAmelCase , return_tensors='''np''' ) # forward pass _a = model(**__UpperCAmelCase ) _a = outputs.logits # verify the logits _a = (1, 1000) self.assertEqual(logits.shape , __UpperCAmelCase ) _a = np.array([-1.2385, -1.0987, -1.0108] ) self.assertTrue(np.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) ) _a = 281 self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> str: _a = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=__UpperCAmelCase , return_tensors='''np''' ) # forward pass _a = model(**__UpperCAmelCase ) _a = outputs.logits # verify the logits _a = (1, 21841) self.assertEqual(logits.shape , __UpperCAmelCase ) _a = np.array([1.6881, -0.2787, 0.5901] ) self.assertTrue(np.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) ) _a = 2396 self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
320
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __snake_case = logging.get_logger('''transformers.models.speecht5''') __snake_case = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } __snake_case = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } __snake_case = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } __snake_case = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } __snake_case = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } __snake_case = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } __snake_case = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } __snake_case = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } __snake_case = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __snake_case = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __snake_case = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __snake_case = [] __snake_case = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] __snake_case = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] __snake_case = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] __snake_case = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[int] ): """simple docstring""" for attribute in key.split('''.''' ): _a = getattr(_lowerCAmelCase, _lowerCAmelCase ) if weight_type is not None: _a = getattr(_lowerCAmelCase, _lowerCAmelCase ).shape else: _a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": _a = value elif weight_type == "weight_g": _a = value elif weight_type == "weight_v": _a = value elif weight_type == "bias": _a = value elif weight_type == "running_mean": _a = value elif weight_type == "running_var": _a = value elif weight_type == "num_batches_tracked": _a = value else: _a = value logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple ): """simple docstring""" for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: _a , _a = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int ): """simple docstring""" _a = [] if task == "s2t": _a = hf_model.speechta.encoder.prenet.feature_encoder _a = MAPPING_S2T _a = IGNORE_KEYS_S2T elif task == "t2s": _a = None _a = MAPPING_T2S _a = IGNORE_KEYS_T2S elif task == "s2s": _a = hf_model.speechta.encoder.prenet.feature_encoder _a = MAPPING_S2S _a = IGNORE_KEYS_S2S else: raise ValueError(f'Unsupported task: {task}' ) for name, value in fairseq_dict.items(): if should_ignore(_lowerCAmelCase, _lowerCAmelCase ): logger.info(f'{name} was ignored' ) continue _a = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', ) _a = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: _a , _a = key.split('''.*.''' ) if prefix in name and suffix in name: _a = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: _a = True if "*" in mapped_key: _a = name.split(_lowerCAmelCase )[0].split('''.''' )[-2] _a = mapped_key.replace('''*''', _lowerCAmelCase ) if "weight_g" in name: _a = '''weight_g''' elif "weight_v" in name: _a = '''weight_v''' elif "bias" in name: _a = '''bias''' elif "weight" in name: _a = '''weight''' elif "running_mean" in name: _a = '''running_mean''' elif "running_var" in name: _a = '''running_var''' elif "num_batches_tracked" in name: _a = '''num_batches_tracked''' else: _a = None set_recursively(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f'Unused weights: {unused_weights}' ) def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any] ): """simple docstring""" _a = full_name.split('''conv_layers.''' )[-1] _a = name.split('''.''' ) _a = int(items[0] ) _a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _a = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _a = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) _a = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) _a = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : int=None, ): """simple docstring""" if config_path is not None: _a = SpeechTaConfig.from_pretrained(_lowerCAmelCase ) else: _a = SpeechTaConfig() if task == "s2t": _a = config.max_text_positions _a = SpeechTaForSpeechToText(_lowerCAmelCase ) elif task == "t2s": _a = 18_76 _a = 6_00 _a = config.max_speech_positions _a = SpeechTaForTextToSpeech(_lowerCAmelCase ) elif task == "s2s": _a = 18_76 _a = config.max_speech_positions _a = SpeechTaForSpeechToSpeech(_lowerCAmelCase ) else: raise ValueError(f'Unknown task name: {task}' ) if vocab_path: _a = SpeechTaTokenizer(_lowerCAmelCase, model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it _a = AddedToken('''<mask>''', lstrip=_lowerCAmelCase, rstrip=_lowerCAmelCase ) _a = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) _a = SpeechTaFeatureExtractor() _a = SpeechTaProcessor(tokenizer=_lowerCAmelCase, feature_extractor=_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) _a = torch.load(_lowerCAmelCase ) recursively_load_weights(fairseq_checkpoint['''model'''], _lowerCAmelCase, _lowerCAmelCase ) model.save_pretrained(_lowerCAmelCase ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(_lowerCAmelCase ) model.push_to_hub(_lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __snake_case = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
320
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue_model_parallelism.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'roberta-large', 'instance_type': 'ml.p3dn.24xlarge', 'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2}, }, ] ) class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> Dict: if self.framework == "pytorch": subprocess.run( F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=__UpperCAmelCase , ) assert hasattr(self , '''env''' ) def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: # configuration for running training on smdistributed Model Parallel _a = { '''enabled''': True, '''processes_per_host''': 8, } _a = { '''enabled''': True, '''parameters''': { '''microbatches''': 4, '''placement_strategy''': '''spread''', '''pipeline''': '''interleaved''', '''optimize''': '''speed''', '''partitions''': 4, '''ddp''': True, }, } _a = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options} _a = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer''' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'{self.env.base_job_name}-{instance_count}-smp-{name_extension}' , instance_count=__UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCAmelCase , hyperparameters={ **self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path, '''max_steps''': 500, } , metric_definitions=self.env.metric_definitions , distribution=__UpperCAmelCase , py_version='''py36''' , ) def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str: TrainingJobAnalytics(__UpperCAmelCase ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(1,)] ) def _UpperCAmelCase ( self , __UpperCAmelCase ) -> int: # create estimator _a = self.create_estimator(__UpperCAmelCase ) # run training estimator.fit() # result dataframe _a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) _a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _a = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __UpperCAmelCase )
320
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[Any] = 'decision_transformer' A_ : Union[str, Any] = ['past_key_values'] A_ : str = { 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , __UpperCAmelCase=17 , __UpperCAmelCase=4 , __UpperCAmelCase=128 , __UpperCAmelCase=4096 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=1024 , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[int]: _a = state_dim _a = act_dim _a = hidden_size _a = max_ep_len _a = action_tanh _a = vocab_size _a = n_positions _a = n_layer _a = n_head _a = n_inner _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = scale_attn_weights _a = use_cache _a = scale_attn_by_inverse_layer_idx _a = reorder_and_upcast_attn _a = bos_token_id _a = eos_token_id super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
320
1
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : str ): """simple docstring""" assert isinstance(_lowerCAmelCase, _lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Dict, _lowerCAmelCase : Dict ): """simple docstring""" _a = tmp_path / '''cache''' _a = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _a = TextDatasetReader(_lowerCAmelCase, cache_dir=_lowerCAmelCase, keep_in_memory=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase, _lowerCAmelCase ) @pytest.mark.parametrize( '''features''', [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ], ) def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : str ): """simple docstring""" _a = tmp_path / '''cache''' _a = {'''text''': '''string'''} _a = features.copy() if features else default_expected_features _a = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _a = TextDatasetReader(_lowerCAmelCase, features=_lowerCAmelCase, cache_dir=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase, _lowerCAmelCase ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : Dict, _lowerCAmelCase : List[str] ): """simple docstring""" _a = tmp_path / '''cache''' _a = {'''text''': '''string'''} _a = TextDatasetReader(_lowerCAmelCase, cache_dir=_lowerCAmelCase, split=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase, _lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''', [str, list] ) def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : List[str], _lowerCAmelCase : int ): """simple docstring""" if issubclass(_lowerCAmelCase, _lowerCAmelCase ): _a = text_path elif issubclass(_lowerCAmelCase, _lowerCAmelCase ): _a = [text_path] _a = tmp_path / '''cache''' _a = {'''text''': '''string'''} _a = TextDatasetReader(_lowerCAmelCase, cache_dir=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase, _lowerCAmelCase ) def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : Optional[Any]=("train",) ): """simple docstring""" assert isinstance(_lowerCAmelCase, _lowerCAmelCase ) for split in splits: _a = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True] ) def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ): """simple docstring""" _a = tmp_path / '''cache''' _a = {'''text''': '''string'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _a = TextDatasetReader({'''train''': text_path}, cache_dir=_lowerCAmelCase, keep_in_memory=_lowerCAmelCase ).read() _check_text_datasetdict(_lowerCAmelCase, _lowerCAmelCase ) @pytest.mark.parametrize( '''features''', [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ], ) def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : int, _lowerCAmelCase : Dict ): """simple docstring""" _a = tmp_path / '''cache''' # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" _a = {'''text''': '''string'''} _a = features.copy() if features else default_expected_features _a = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _a = TextDatasetReader({'''train''': text_path}, features=_lowerCAmelCase, cache_dir=_lowerCAmelCase ).read() _check_text_datasetdict(_lowerCAmelCase, _lowerCAmelCase ) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Dict, _lowerCAmelCase : Tuple ): """simple docstring""" if split: _a = {split: text_path} else: _a = '''train''' _a = {'''train''': text_path, '''test''': text_path} _a = tmp_path / '''cache''' _a = {'''text''': '''string'''} _a = TextDatasetReader(_lowerCAmelCase, cache_dir=_lowerCAmelCase ).read() _check_text_datasetdict(_lowerCAmelCase, _lowerCAmelCase, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __snake_case = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[str] = ['pixel_values'] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None: super().__init__(**__UpperCAmelCase ) _a = size if size is not None else {'''shortest_edge''': 224} _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) _a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) _a = do_resize _a = size _a = resample _a = do_center_crop _a = crop_size _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _a = image_std if image_std is not None else OPENAI_CLIP_STD _a = do_convert_rgb def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _a = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: _a = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]: return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image: _a = do_resize if do_resize is not None else self.do_resize _a = size if size is not None else self.size _a = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) _a = resample if resample is not None else self.resample _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _a = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _a = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. _a = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: _a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: _a = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: _a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: _a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] _a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
320
1
"""simple docstring""" def A_ ( _lowerCAmelCase : int = 60_08_51_47_51_43 ): """simple docstring""" try: _a = int(_lowerCAmelCase ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) _a = 2 _a = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 _a = i while n % i == 0: _a = n // i i += 1 return int(_lowerCAmelCase ) if __name__ == "__main__": print(f'{solution() = }')
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[int], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int ): """simple docstring""" if index == r: for j in range(_lowerCAmelCase ): print(data[j], end=''' ''' ) print(''' ''' ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location _a = arr[i] combination_util(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, index + 1, _lowerCAmelCase, i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Any, _lowerCAmelCase : Optional[Any] ): """simple docstring""" _a = [0] * r # Print all combination using temporary array 'data[]' combination_util(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, 0, _lowerCAmelCase, 0 ) if __name__ == "__main__": # Driver code to check the function above __snake_case = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
320
"""simple docstring""" from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge __snake_case = [ '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the''' ''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe''' ''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''', '''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal''' ''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s''' ''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the''' ''' body.''', '''Amnesty International releases its annual report on the death penalty. The report catalogs the use of''' ''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the''' ''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital''' ''' punishment.''', ] __snake_case = [ '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''' ''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz''' ''' had informed his Lufthansa training school of an episode of severe depression, airline says .''', '''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .''' ''' Israel and the United States opposed the move, which could open the door to war crimes investigations against''' ''' Israelis .''', '''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to''' ''' death . Organization claims that governments around the world are using the threat of terrorism to advance''' ''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death''' ''' sentences up by 28% .''', ] def A_ ( ): """simple docstring""" _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase ) _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2'''] ) assert ( pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean() ) def A_ ( ): """simple docstring""" _a = '''rougeLsum''' _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k] assert score > score_no_sep def A_ ( ): """simple docstring""" _a = ['''rouge1''', '''rouge2''', '''rougeL'''] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase ) _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase ) assert score_sep == score_no_sep def A_ ( ): """simple docstring""" _a = [ '''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''', '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''', ] _a = [ '''Margot Frank, died in 1945, a month earlier than previously thought.''', '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of''' ''' the final seconds on board Flight 9525.''', ] assert calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) def A_ ( ): """simple docstring""" _a = [ '''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" ''' ] _a = [ ''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .''' ] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=_lowerCAmelCase )['''rougeLsum'''] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum'''] assert new_score > prev_score def A_ ( ): """simple docstring""" _a = Path('''examples/seq2seq/test_data/wmt_en_ro''' ) _a = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase ) _a = calculate_rouge_path( data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=_lowerCAmelCase ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case = { '''configuration_trajectory_transformer''': [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrajectoryTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrajectoryTransformerModel''', '''TrajectoryTransformerPreTrainedModel''', '''load_tf_weights_in_trajectory_transformer''', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor __snake_case = logging.get_logger(__name__) class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: warnings.warn( '''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
320
1
"""simple docstring""" import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version __snake_case = logging.getLogger(__name__) require_version('''pytorch_lightning>=1.0.4''') __snake_case = { '''base''': AutoModel, '''sequence-classification''': AutoModelForSequenceClassification, '''question-answering''': AutoModelForQuestionAnswering, '''pretraining''': AutoModelForPreTraining, '''token-classification''': AutoModelForTokenClassification, '''language-modeling''': AutoModelWithLMHead, '''summarization''': AutoModelForSeqaSeqLM, '''translation''': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization __snake_case = { '''linear''': get_linear_schedule_with_warmup, '''cosine''': get_cosine_schedule_with_warmup, '''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup, '''polynomial''': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } __snake_case = sorted(arg_to_scheduler.keys()) __snake_case = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}''' class __lowerCamelCase ( pl.LightningModule ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__UpperCAmelCase ) _a = 0 _a = Path(self.hparams.output_dir ) _a = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _a = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , ) else: _a = config _a = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ): assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) ) if tokenizer is None: _a = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , ) else: _a = tokenizer _a = MODEL_MODES[mode] if model is None: _a = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , ) else: _a = model def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: _a = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Tuple: _a = arg_to_scheduler[self.hparams.lr_scheduler] _a = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _a = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def _UpperCAmelCase ( self ) -> Optional[int]: _a = self.model _a = ['''bias''', '''LayerNorm.weight'''] _a = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: _a = Adafactor( __UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase ) else: _a = AdamW( __UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _a = optimizer _a = self.get_lr_scheduler() return [optimizer], [scheduler] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: return self.validation_step(__UpperCAmelCase , __UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str: return self.validation_end(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> int: _a = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _a = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: if stage == "test": _a = len(self.test_dataloader().dataset ) else: _a = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase ) _a = len(self.train_dataloader().dataset ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ) -> Union[str, Any]: raise NotImplementedError('''You must implement this for your task''' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: return self.train_loader def _UpperCAmelCase ( self ) -> int: return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( __UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def _UpperCAmelCase ( self , __UpperCAmelCase ) -> None: _a = self.output_dir.joinpath('''best_tfmr''' ) _a = self.step_count self.model.save_pretrained(__UpperCAmelCase ) self.tokenizer.save_pretrained(__UpperCAmelCase ) @staticmethod def _UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict: parser.add_argument( '''--model_name_or_path''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=__UpperCAmelCase , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(__UpperCAmelCase ).parent / '''test_run''' / '''cache''' ) , type=__UpperCAmelCase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=__UpperCAmelCase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=__UpperCAmelCase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=__UpperCAmelCase , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=__UpperCAmelCase , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5e-5 , type=__UpperCAmelCase , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=__UpperCAmelCase , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__UpperCAmelCase , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=__UpperCAmelCase , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=__UpperCAmelCase , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=__UpperCAmelCase ) parser.add_argument('''--train_batch_size''' , default=32 , type=__UpperCAmelCase ) parser.add_argument('''--eval_batch_size''' , default=32 , type=__UpperCAmelCase ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class __lowerCamelCase ( pl.Callback ): '''simple docstring''' def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class __lowerCamelCase ( pl.Callback ): '''simple docstring''' def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__UpperCAmelCase ) class __lowerCamelCase ( pl.Callback ): '''simple docstring''' def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: _a = trainer.lr_schedulers[0]['''scheduler'''] _a = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: rank_zero_info('''***** Validation results *****''' ) _a = trainer.callback_metrics # Log results for key in sorted(__UpperCAmelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> str: rank_zero_info('''***** Test results *****''' ) _a = trainer.callback_metrics # Log and save results to file _a = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(__UpperCAmelCase , '''w''' ) as writer: for key in sorted(__UpperCAmelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) ) def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Tuple ): """simple docstring""" parser.add_argument( '''--output_dir''', default=str(Path(_lowerCAmelCase ).parent / '''test_run''' / '''model_checkpoints''' ), type=_lowerCAmelCase, help='''The output directory where the model predictions and checkpoints will be written.''', ) parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''', ) parser.add_argument( '''--fp16_opt_level''', type=_lowerCAmelCase, default='''O2''', help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ), ) parser.add_argument('''--n_tpu_cores''', dest='''tpu_cores''', type=_lowerCAmelCase ) parser.add_argument('''--max_grad_norm''', dest='''gradient_clip_val''', default=1.0, type=_lowerCAmelCase, help='''Max gradient norm''' ) parser.add_argument('''--do_train''', action='''store_true''', help='''Whether to run training.''' ) parser.add_argument('''--do_predict''', action='''store_true''', help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''', dest='''accumulate_grad_batches''', type=_lowerCAmelCase, default=1, help='''Number of updates steps to accumulate before performing a backward/update pass.''', ) parser.add_argument('''--seed''', type=_lowerCAmelCase, default=42, help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''', default=str(Path(_lowerCAmelCase ).parent / '''test_run''' / '''dummy-train-data''' ), type=_lowerCAmelCase, help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''', ) def A_ ( _lowerCAmelCase : BaseTransformer, _lowerCAmelCase : argparse.Namespace, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : List[Any]=True, _lowerCAmelCase : Tuple=[], _lowerCAmelCase : str=None, _lowerCAmelCase : int=None, **_lowerCAmelCase : List[Any], ): """simple docstring""" pl.seed_everything(args.seed ) # init model _a = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_lowerCAmelCase ) # add custom checkpoints if checkpoint_callback is None: _a = pl.callbacks.ModelCheckpoint( filepath=args.output_dir, prefix='''checkpoint''', monitor='''val_loss''', mode='''min''', save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_lowerCAmelCase ) if logging_callback is None: _a = LoggingCallback() _a = {} if args.fpaa: _a = 16 if args.gpus > 1: _a = '''auto''' _a = '''ddp''' _a = args.accumulate_grad_batches _a = None _a = '''auto''' _a = pl.Trainer.from_argparse_args( _lowerCAmelCase, weights_summary=_lowerCAmelCase, callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback], logger=_lowerCAmelCase, val_check_interval=1, num_sanity_val_steps=2, **_lowerCAmelCase, ) if args.do_train: trainer.fit(_lowerCAmelCase ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
320
"""simple docstring""" from __future__ import annotations def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ): """simple docstring""" if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) __snake_case = { '''sample_size''': 32, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 1000, '''block_out_channels''': [32, 64], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __snake_case = { '''sample_size''': 64, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 1000, '''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __snake_case = { '''sample_size''': 256, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __snake_case = { '''num_train_timesteps''': 40, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } __snake_case = { '''num_train_timesteps''': 201, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } __snake_case = { '''num_train_timesteps''': 151, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } def A_ ( _lowerCAmelCase : Tuple ): """simple docstring""" if isinstance(_lowerCAmelCase, _lowerCAmelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : str, _lowerCAmelCase : str, _lowerCAmelCase : Any=False ): """simple docstring""" _a = checkpoint[f'{old_prefix}.in_layers.0.weight'] _a = checkpoint[f'{old_prefix}.in_layers.0.bias'] _a = checkpoint[f'{old_prefix}.in_layers.2.weight'] _a = checkpoint[f'{old_prefix}.in_layers.2.bias'] _a = checkpoint[f'{old_prefix}.emb_layers.1.weight'] _a = checkpoint[f'{old_prefix}.emb_layers.1.bias'] _a = checkpoint[f'{old_prefix}.out_layers.0.weight'] _a = checkpoint[f'{old_prefix}.out_layers.0.bias'] _a = checkpoint[f'{old_prefix}.out_layers.3.weight'] _a = checkpoint[f'{old_prefix}.out_layers.3.bias'] if has_skip: _a = checkpoint[f'{old_prefix}.skip_connection.weight'] _a = checkpoint[f'{old_prefix}.skip_connection.bias'] return new_checkpoint def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : Any, _lowerCAmelCase : List[str], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[int]=None ): """simple docstring""" _a , _a , _a = checkpoint[f'{old_prefix}.qkv.weight'].chunk(3, dim=0 ) _a , _a , _a = checkpoint[f'{old_prefix}.qkv.bias'].chunk(3, dim=0 ) _a = checkpoint[f'{old_prefix}.norm.weight'] _a = checkpoint[f'{old_prefix}.norm.bias'] _a = weight_q.squeeze(-1 ).squeeze(-1 ) _a = bias_q.squeeze(-1 ).squeeze(-1 ) _a = weight_k.squeeze(-1 ).squeeze(-1 ) _a = bias_k.squeeze(-1 ).squeeze(-1 ) _a = weight_v.squeeze(-1 ).squeeze(-1 ) _a = bias_v.squeeze(-1 ).squeeze(-1 ) _a = ( checkpoint[f'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 ) ) _a = checkpoint[f'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : Any ): """simple docstring""" _a = torch.load(_lowerCAmelCase, map_location='''cpu''' ) _a = {} _a = checkpoint['''time_embed.0.weight'''] _a = checkpoint['''time_embed.0.bias'''] _a = checkpoint['''time_embed.2.weight'''] _a = checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: _a = checkpoint['''label_emb.weight'''] _a = checkpoint['''input_blocks.0.0.weight'''] _a = checkpoint['''input_blocks.0.0.bias'''] _a = unet_config['''down_block_types'''] _a = unet_config['''layers_per_block'''] _a = unet_config['''attention_head_dim'''] _a = unet_config['''block_out_channels'''] _a = 1 _a = channels_list[0] for i, layer_type in enumerate(_lowerCAmelCase ): _a = channels_list[i] _a = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(_lowerCAmelCase ): _a = f'down_blocks.{i}.resnets.{j}' _a = f'input_blocks.{current_layer}.0' _a = True if j == 0 and downsample_block_has_skip else False _a = convert_resnet(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, has_skip=_lowerCAmelCase ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(_lowerCAmelCase ): _a = f'down_blocks.{i}.resnets.{j}' _a = f'input_blocks.{current_layer}.0' _a = True if j == 0 and downsample_block_has_skip else False _a = convert_resnet(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, has_skip=_lowerCAmelCase ) _a = f'down_blocks.{i}.attentions.{j}' _a = f'input_blocks.{current_layer}.1' _a = convert_attention( _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) current_layer += 1 if i != len(_lowerCAmelCase ) - 1: _a = f'down_blocks.{i}.downsamplers.0' _a = f'input_blocks.{current_layer}.0' _a = convert_resnet(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) current_layer += 1 _a = current_channels # hardcoded the mid-block for now _a = '''mid_block.resnets.0''' _a = '''middle_block.0''' _a = convert_resnet(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _a = '''mid_block.attentions.0''' _a = '''middle_block.1''' _a = convert_attention(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _a = '''mid_block.resnets.1''' _a = '''middle_block.2''' _a = convert_resnet(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _a = 0 _a = unet_config['''up_block_types'''] for i, layer_type in enumerate(_lowerCAmelCase ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _a = f'up_blocks.{i}.resnets.{j}' _a = f'output_blocks.{current_layer}.0' _a = convert_resnet(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, has_skip=_lowerCAmelCase ) current_layer += 1 if i != len(_lowerCAmelCase ) - 1: _a = f'up_blocks.{i}.upsamplers.0' _a = f'output_blocks.{current_layer-1}.1' _a = convert_resnet(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _a = f'up_blocks.{i}.resnets.{j}' _a = f'output_blocks.{current_layer}.0' _a = convert_resnet(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, has_skip=_lowerCAmelCase ) _a = f'up_blocks.{i}.attentions.{j}' _a = f'output_blocks.{current_layer}.1' _a = convert_attention( _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) current_layer += 1 if i != len(_lowerCAmelCase ) - 1: _a = f'up_blocks.{i}.upsamplers.0' _a = f'output_blocks.{current_layer-1}.2' _a = convert_resnet(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _a = checkpoint['''out.0.weight'''] _a = checkpoint['''out.0.bias'''] _a = checkpoint['''out.2.weight'''] _a = checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') __snake_case = parser.parse_args() __snake_case = strabool(args.class_cond) __snake_case = os.path.basename(args.unet_path) print(f'Checkpoint: {ckpt_name}') # Get U-Net config if "imagenet64" in ckpt_name: __snake_case = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __snake_case = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: __snake_case = TEST_UNET_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') if not args.class_cond: __snake_case = None __snake_case = con_pt_to_diffuser(args.unet_path, unet_config) __snake_case = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: __snake_case = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: __snake_case = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __snake_case = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') __snake_case = CMStochasticIterativeScheduler(**scheduler_config) __snake_case = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
320
"""simple docstring""" def A_ ( ): """simple docstring""" _a = [] _a = 1 while len(_lowerCAmelCase ) < 1e6: constant.append(str(_lowerCAmelCase ) ) i += 1 _a = ''''''.join(_lowerCAmelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[9_99] ) * int(constant[99_99] ) * int(constant[9_99_99] ) * int(constant[99_99_99] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCamelCase ( a__ , unittest.TestCase ): '''simple docstring''' A_ : Union[str, Any] = LEDTokenizer A_ : Dict = LEDTokenizerFast A_ : Optional[int] = True def _UpperCAmelCase ( self ) -> int: super().setUp() _a = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) _a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _a = {'''unk_token''': '''<unk>'''} _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__UpperCAmelCase ) ) def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> str: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Tuple: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return "lower newer", "lower newer" @cached_property def _UpperCAmelCase ( self ) -> List[str]: return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def _UpperCAmelCase ( self ) -> Optional[int]: return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def _UpperCAmelCase ( self ) -> Optional[int]: _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] _a = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(__UpperCAmelCase , max_length=len(__UpperCAmelCase ) , padding=__UpperCAmelCase , return_tensors='''pt''' ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _a = batch.input_ids.tolist()[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def _UpperCAmelCase ( self ) -> List[str]: _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''pt''' ) self.assertIn('''input_ids''' , __UpperCAmelCase ) self.assertIn('''attention_mask''' , __UpperCAmelCase ) self.assertNotIn('''labels''' , __UpperCAmelCase ) self.assertNotIn('''decoder_attention_mask''' , __UpperCAmelCase ) @require_torch def _UpperCAmelCase ( self ) -> Optional[Any]: _a = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(text_target=__UpperCAmelCase , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def _UpperCAmelCase ( self ) -> Optional[Any]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def _UpperCAmelCase ( self ) -> Optional[Any]: _a = ['''A long paragraph for summarization.'''] _a = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' ) _a = tokenizer(text_target=__UpperCAmelCase , return_tensors='''pt''' ) _a = inputs['''input_ids'''] _a = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def _UpperCAmelCase ( self ) -> Optional[int]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _a = ['''Summary of the text.''', '''Another summary.'''] _a = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase ) _a = [[0] * len(__UpperCAmelCase ) for x in encoded_output['''input_ids''']] _a = tokenizer.pad(__UpperCAmelCase ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> int: pass def _UpperCAmelCase ( self ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): _a = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) _a = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) _a = '''A, <mask> AllenNLP sentence.''' _a = tokenizer_r.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase ) _a = tokenizer_p.encode_plus(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) _a = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) _a = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( __UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
320
"""simple docstring""" import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __snake_case = logging.get_logger(__name__) __snake_case = { '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''', # See all BART models at https://huggingface.co/models?filter=bart } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[str] = 'bart' A_ : Optional[Any] = ['past_key_values'] A_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Tuple: _a = vocab_size _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = encoder_layerdrop _a = decoder_layerdrop _a = classifier_dropout _a = use_cache _a = encoder_layers _a = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): _a = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' '''The config can simply be saved and uploaded again to be fixed.''' ) class __lowerCamelCase ( a__ ): '''simple docstring''' @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _a = {0: '''batch'''} _a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: _a = {0: '''batch''', 1: '''decoder_sequence'''} _a = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _a , _a = self.num_layers for i in range(__UpperCAmelCase ): _a = {0: '''batch''', 2: '''past_sequence + sequence'''} _a = {0: '''batch''', 2: '''past_sequence + sequence'''} else: _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _a = super().outputs else: _a = super(__UpperCAmelCase , self ).outputs if self.use_past: _a , _a = self.num_layers for i in range(__UpperCAmelCase ): _a = {0: '''batch''', 2: '''past_sequence + sequence'''} _a = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs _a = seq_length if not self.use_past else 1 _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} _a = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape _a = common_inputs['''decoder_input_ids'''].shape[1] _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = decoder_seq_length + 3 _a = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _a = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) _a = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _a , _a = self.num_layers _a = min(__UpperCAmelCase , __UpperCAmelCase ) _a = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers _a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. _a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _a = seqlen + 2 _a , _a = self.num_layers _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = common_inputs['''attention_mask'''].dtype _a = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) _a = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _a = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _a = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) _a = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence _a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size _a = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _a = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": _a = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: if self.task in ["default", "seq2seq-lm"]: _a = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: _a = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
320
1
"""simple docstring""" import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __snake_case = logging.get_logger(__name__) __snake_case = { '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''', # See all BART models at https://huggingface.co/models?filter=bart } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[str] = 'bart' A_ : Optional[Any] = ['past_key_values'] A_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Tuple: _a = vocab_size _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = encoder_layerdrop _a = decoder_layerdrop _a = classifier_dropout _a = use_cache _a = encoder_layers _a = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): _a = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' '''The config can simply be saved and uploaded again to be fixed.''' ) class __lowerCamelCase ( a__ ): '''simple docstring''' @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _a = {0: '''batch'''} _a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: _a = {0: '''batch''', 1: '''decoder_sequence'''} _a = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _a , _a = self.num_layers for i in range(__UpperCAmelCase ): _a = {0: '''batch''', 2: '''past_sequence + sequence'''} _a = {0: '''batch''', 2: '''past_sequence + sequence'''} else: _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _a = super().outputs else: _a = super(__UpperCAmelCase , self ).outputs if self.use_past: _a , _a = self.num_layers for i in range(__UpperCAmelCase ): _a = {0: '''batch''', 2: '''past_sequence + sequence'''} _a = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs _a = seq_length if not self.use_past else 1 _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} _a = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape _a = common_inputs['''decoder_input_ids'''].shape[1] _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = decoder_seq_length + 3 _a = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _a = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) _a = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _a , _a = self.num_layers _a = min(__UpperCAmelCase , __UpperCAmelCase ) _a = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers _a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. _a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _a = seqlen + 2 _a , _a = self.num_layers _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = common_inputs['''attention_mask'''].dtype _a = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) _a = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _a = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _a = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) _a = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence _a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size _a = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _a = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": _a = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: if self.task in ["default", "seq2seq-lm"]: _a = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: _a = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
320
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A_ ( _lowerCAmelCase : Dict ): """simple docstring""" if ( (cp >= 0x4e00 and cp <= 0x9fff) or (cp >= 0x3400 and cp <= 0x4dbf) # or (cp >= 0x2_0000 and cp <= 0x2_a6df) # or (cp >= 0x2_a700 and cp <= 0x2_b73f) # or (cp >= 0x2_b740 and cp <= 0x2_b81f) # or (cp >= 0x2_b820 and cp <= 0x2_ceaf) # or (cp >= 0xf900 and cp <= 0xfaff) or (cp >= 0x2_f800 and cp <= 0x2_fa1f) # ): # return True return False def A_ ( _lowerCAmelCase : str ): """simple docstring""" for char in word: _a = ord(_lowerCAmelCase ) if not _is_chinese_char(_lowerCAmelCase ): return 0 return 1 def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" _a = set() for token in tokens: _a = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase ) if chinese_word: word_set.add(_lowerCAmelCase ) _a = list(_lowerCAmelCase ) return word_list def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens _a = max([len(_lowerCAmelCase ) for w in chinese_word_set] ) _a = bert_tokens _a , _a = 0, len(_lowerCAmelCase ) while start < end: _a = True if is_chinese(bert_word[start] ): _a = min(end - start, _lowerCAmelCase ) for i in range(_lowerCAmelCase, 1, -1 ): _a = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1, start + i ): _a = '''##''' + bert_word[j] _a = start + i _a = False break if single_word: start += 1 return bert_word def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : LTP, _lowerCAmelCase : BertTokenizer ): """simple docstring""" _a = [] for i in range(0, len(_lowerCAmelCase ), 1_00 ): _a = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws _a = [get_chinese_word(_lowerCAmelCase ) for r in res] ltp_res.extend(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) _a = [] for i in range(0, len(_lowerCAmelCase ), 1_00 ): _a = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=_lowerCAmelCase, truncation=_lowerCAmelCase, max_length=5_12 ) bert_res.extend(res['''input_ids'''] ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) _a = [] for input_ids, chinese_word in zip(_lowerCAmelCase, _lowerCAmelCase ): _a = [] for id in input_ids: _a = bert_tokenizer._convert_id_to_token(_lowerCAmelCase ) input_tokens.append(_lowerCAmelCase ) _a = add_sub_symbol(_lowerCAmelCase, _lowerCAmelCase ) _a = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_lowerCAmelCase ): if token[:2] == "##": _a = token[2:] # save chinese tokens' pos if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ): ref_id.append(_lowerCAmelCase ) ref_ids.append(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) return ref_ids def A_ ( _lowerCAmelCase : Any ): """simple docstring""" with open(args.file_name, '''r''', encoding='''utf-8''' ) as f: _a = f.readlines() _a = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _a = LTP(args.ltp ) # faster in GPU device _a = BertTokenizer.from_pretrained(args.bert ) _a = prepare_ref(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) with open(args.save_path, '''w''', encoding='''utf-8''' ) as f: _a = [json.dumps(_lowerCAmelCase ) + '''\n''' for ref in ref_ids] f.writelines(_lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', required=False, type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', required=False, type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''', ) parser.add_argument( '''--bert''', required=False, type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''', ) parser.add_argument( '''--save_path''', required=False, type=str, default='''./resources/ref.txt''', help='''path to save res''', ) __snake_case = parser.parse_args() main(args)
320
1
"""simple docstring""" from __future__ import annotations def A_ ( _lowerCAmelCase : list[float], _lowerCAmelCase : list[float] ): """simple docstring""" _a = sorted(numsa + numsa ) _a , _a = divmod(len(_lowerCAmelCase ), 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __snake_case = [float(x) for x in input('''Enter the elements of first array: ''').split()] __snake_case = [float(x) for x in input('''Enter the elements of second array: ''').split()] print(f'The median of two arrays is: {median_of_two_arrays(array_a, array_a)}')
320
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[Any] = 'gptj' A_ : Optional[int] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Union[str, Any]: _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = rotary_dim _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = use_cache _a = bos_token_id _a = eos_token_id super().__init__( bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase ) class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[Any]: super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ): # TODO: how to do that better? _a = 0 @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) _a = {0: '''batch''', 1: '''past_sequence + sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _UpperCAmelCase ( self ) -> int: return self._config.n_layer @property def _UpperCAmelCase ( self ) -> int: return self._config.n_head def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = super(__UpperCAmelCase , self ).generate_dummy_inputs( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) # We need to order the input in the way they appears in the forward() _a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _a = seqlen + 2 _a = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _a = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers ) ] _a = common_inputs['''attention_mask'''] if self.use_past: _a = ordered_inputs['''attention_mask'''].dtype _a = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _UpperCAmelCase ( self ) -> int: return 13
320
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor __snake_case = logging.get_logger(__name__) class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: warnings.warn( '''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use DeformableDetrImageProcessor instead.''' , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
320
"""simple docstring""" import os import sys import unittest __snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') __snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> str: _a = get_test_to_tester_mapping(__UpperCAmelCase ) _a = get_test_to_tester_mapping(__UpperCAmelCase ) _a = {'''BertModelTest''': '''BertModelTester'''} _a = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = get_model_to_test_mapping(__UpperCAmelCase ) _a = get_model_to_test_mapping(__UpperCAmelCase ) _a = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } _a = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = get_model_to_tester_mapping(__UpperCAmelCase ) _a = get_model_to_tester_mapping(__UpperCAmelCase ) _a = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } _a = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
320
1
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" if isinstance(_lowerCAmelCase, collections.abc.Iterable ): return x return (x, x) @require_flax class __lowerCamelCase : '''simple docstring''' def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: pass def _UpperCAmelCase ( self ) -> Dict: pass def _UpperCAmelCase ( self ) -> Dict: pass def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: _a = np.abs((a - b) ).max() self.assertLessEqual(__UpperCAmelCase , __UpperCAmelCase , F'Difference between torch and flax is {diff} (>= {tol}).' ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[int]: _a = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase ) _a = FlaxVisionTextDualEncoderModel(__UpperCAmelCase ) _a = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]: _a , _a = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase ) _a = {'''vision_model''': vision_model, '''text_model''': text_model} _a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase ) _a = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Any: _a , _a = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase ) _a = {'''vision_model''': vision_model, '''text_model''': text_model} _a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase ) _a = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase ) _a = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase ) _a = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase ) _a = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase ) _a = after_output[0] _a = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCAmelCase , 1e-3 ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Dict: _a , _a = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase ) _a = {'''vision_model''': vision_model, '''text_model''': text_model} _a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase ) _a = model( input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase ) _a = output.vision_model_output.attentions self.assertEqual(len(__UpperCAmelCase ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _a = to_atuple(vision_model.config.image_size ) _a = to_atuple(vision_model.config.patch_size ) _a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _a = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _a = output.text_model_output.attentions self.assertEqual(len(__UpperCAmelCase ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: pt_model.to(__UpperCAmelCase ) pt_model.eval() # prepare inputs _a = inputs_dict _a = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): _a = pt_model(**__UpperCAmelCase ).to_tuple() _a = fx_model(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(__UpperCAmelCase , pt_output.numpy() , 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__UpperCAmelCase ) _a = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) _a = fx_model_loaded(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(__UpperCAmelCase , pt_output.numpy() , 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__UpperCAmelCase ) _a = VisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase ) pt_model_loaded.to(__UpperCAmelCase ) pt_model_loaded.eval() with torch.no_grad(): _a = pt_model_loaded(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(__UpperCAmelCase , pt_output_loaded.numpy() , 4e-2 ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: _a = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase ) _a = VisionTextDualEncoderModel(__UpperCAmelCase ) _a = FlaxVisionTextDualEncoderModel(__UpperCAmelCase ) _a = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase ) _a = fx_state self.check_pt_flax_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: _a = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase ) _a = VisionTextDualEncoderModel(__UpperCAmelCase ) _a = FlaxVisionTextDualEncoderModel(__UpperCAmelCase ) _a = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params ) self.check_pt_flax_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> str: _a = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Any: _a = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = self.prepare_config_and_inputs() self.check_save_load(**__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[str]: _a = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__UpperCAmelCase ) @is_pt_flax_cross_test def _UpperCAmelCase ( self ) -> Any: _a = self.prepare_config_and_inputs() _a = config_inputs_dict.pop('''vision_config''' ) _a = config_inputs_dict.pop('''text_config''' ) _a = config_inputs_dict self.check_equivalence_pt_to_flax(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.check_equivalence_flax_to_pt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> List[Any]: _a , _a = self.get_pretrained_model_and_inputs() _a = model_a(**__UpperCAmelCase ) _a = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__UpperCAmelCase ) _a = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase ) _a = model_a(**__UpperCAmelCase ) _a = after_outputs[0] _a = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCAmelCase , 1e-5 ) @require_flax class __lowerCamelCase ( a__ , unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> Optional[int]: _a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__UpperCAmelCase , text_from_pt=__UpperCAmelCase , ) _a = 13 _a = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _a = random_attention_mask([batch_size, 4] ) _a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: _a = FlaxViTModel(__UpperCAmelCase ) _a = FlaxBertModel(__UpperCAmelCase ) return vision_model, text_model def _UpperCAmelCase ( self ) -> str: _a = FlaxViTModelTester(self ) _a = FlaxBertModelTester(self ) _a = vit_model_tester.prepare_config_and_inputs() _a = bert_model_tester.prepare_config_and_inputs() _a , _a = vision_config_and_inputs _a , _a , _a , _a = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __lowerCamelCase ( a__ , unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> Dict: _a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( '''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__UpperCAmelCase , text_from_pt=__UpperCAmelCase , ) _a = 13 _a = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _a = random_attention_mask([batch_size, 4] ) _a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask} return model, inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: _a = FlaxCLIPVisionModel(__UpperCAmelCase ) _a = FlaxBertModel(__UpperCAmelCase ) return vision_model, text_model def _UpperCAmelCase ( self ) -> str: _a = FlaxCLIPVisionModelTester(self ) _a = FlaxBertModelTester(self ) _a = clip_model_tester.prepare_config_and_inputs() _a = bert_model_tester.prepare_config_and_inputs() _a , _a = vision_config_and_inputs _a , _a , _a , _a = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow def _UpperCAmelCase ( self ) -> Any: _a = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 ) _a = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' ) _a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) _a = processor( text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''np''' ) _a = model(**__UpperCAmelCase ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _a = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , __UpperCAmelCase , atol=1e-3 ) )
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __lowerCamelCase : '''simple docstring''' @staticmethod def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: pass def A_ ( _lowerCAmelCase : Image ): """simple docstring""" _a = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def A_ ( _lowerCAmelCase : Image ): """simple docstring""" _a = np.array(_lowerCAmelCase ) _a = npimg.shape return {"hash": hashimage(_lowerCAmelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' A_ : Any = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) A_ : str = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: _a = MaskGenerationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int: pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def _UpperCAmelCase ( self ) -> List[str]: pass @slow @require_torch def _UpperCAmelCase ( self ) -> int: _a = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) _a = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 ) # Shortening by hashing _a = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871} ] , ) # fmt: on @require_torch @slow def _UpperCAmelCase ( self ) -> Any: _a = '''facebook/sam-vit-huge''' _a = pipeline('''mask-generation''' , model=__UpperCAmelCase ) _a = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing _a = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053}, ] , )
320
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = [ '''small''', '''small-base''', '''medium''', '''medium-base''', '''intermediate''', '''intermediate-base''', '''large''', '''large-base''', '''xlarge''', '''xlarge-base''', ] __snake_case = { '''vocab_file''': { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''', '''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''', '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''', '''funnel-transformer/medium-base''': ( '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt''' ), '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''', '''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''', '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''', '''funnel-transformer/xlarge-base''': ( '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''', '''funnel-transformer/small-base''': ( '''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''', '''funnel-transformer/medium-base''': ( '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''', '''funnel-transformer/large-base''': ( '''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''', '''funnel-transformer/xlarge-base''': ( '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json''' ), }, } __snake_case = {f'funnel-transformer/{name}': 512 for name in _model_names} __snake_case = {f'funnel-transformer/{name}': {'''do_lower_case''': True} for name in _model_names} class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[str] = VOCAB_FILES_NAMES A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP A_ : Any = PRETRAINED_INIT_CONFIGURATION A_ : Optional[Any] = FunnelTokenizer A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : int = 2 def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase="##" , **__UpperCAmelCase , ) -> int: super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , clean_text=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , wordpieces_prefix=__UpperCAmelCase , **__UpperCAmelCase , ) _a = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars ): _a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) ) _a = do_lower_case _a = strip_accents _a = tokenize_chinese_chars _a = normalizer_class(**__UpperCAmelCase ) _a = do_lower_case def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Dict: _a = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: _a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase )
320
"""simple docstring""" import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]: _a = parent _a = batch_size _a = encoder_seq_length _a = decoder_seq_length # For common tests _a = self.decoder_seq_length _a = is_training _a = use_attention_mask _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = d_ff _a = relative_attention_num_buckets _a = dropout_rate _a = initializer_factor _a = eos_token_id _a = pad_token_id _a = decoder_start_token_id _a = None _a = decoder_layers def _UpperCAmelCase ( self ) -> Dict: return TaConfig.from_pretrained('''google/umt5-base''' ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]: if attention_mask is None: _a = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _a = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase ) if decoder_head_mask is None: _a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase ) if cross_attn_head_mask is None: _a = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def _UpperCAmelCase ( self ) -> Tuple: _a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) _a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _a = input_ids.clamp(self.pad_token_id + 1 ) _a = decoder_input_ids.clamp(self.pad_token_id + 1 ) _a = self.get_config() _a = config.num_attention_heads _a = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, input_dict def _UpperCAmelCase ( self ) -> int: _a , _a = self.prepare_config_and_inputs() return config, inputs_dict def _UpperCAmelCase ( self ) -> Tuple: return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _UpperCAmelCase ( self ) -> List[str]: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict: _a = UMTaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _a = model( input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , ) _a = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ) _a = result.last_hidden_state _a = result.past_key_values _a = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]: _a = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval() # first forward pass _a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _a = model(__UpperCAmelCase ) _a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _a , _a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _a = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and _a = torch.cat([input_ids, next_tokens] , dim=-1 ) _a = model(__UpperCAmelCase )['''last_hidden_state'''] _a = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state'''] # select random slice _a = ids_tensor((1,) , output_from_past.shape[-1] ).item() _a = output_from_no_past[:, -1, random_slice_idx].detach() _a = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]: _a = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval() _a = model(**__UpperCAmelCase )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() ) @require_torch class __lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) A_ : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () A_ : int = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) A_ : str = True A_ : List[str] = False A_ : List[Any] = False A_ : str = True A_ : List[str] = True # The small UMT5 model needs higher percentages for CPU/MP tests A_ : Optional[Any] = [0.8, 0.9] def _UpperCAmelCase ( self ) -> Tuple: _a = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def _UpperCAmelCase ( self ) -> int: _a = self.model_tester.prepare_config_and_inputs() _a = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( __UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] _a = self.model_tester.prepare_config_and_inputs() _a = config_and_inputs[0] _a = UMTaForConditionalGeneration(__UpperCAmelCase ).eval() model.to(__UpperCAmelCase ) _a = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ), } for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ): _a = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _a = torch.ones( config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ) _a = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , ) # We check the state of decoder_attentions and cross_attentions just from the last step _a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def _UpperCAmelCase ( self ) -> int: pass @require_torch @require_sentencepiece @require_tokenizers class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def _UpperCAmelCase ( self ) -> Optional[int]: _a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase ) _a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase ) _a = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] _a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids # fmt: off _a = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase ) _a = model.generate(input_ids.to(__UpperCAmelCase ) ) _a = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] _a = tokenizer.batch_decode(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
320
1
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class __lowerCamelCase : '''simple docstring''' def __init__( self ) -> Tuple: _a = {} def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int: if self.graph.get(__UpperCAmelCase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: _a = [[w, v]] if not self.graph.get(__UpperCAmelCase ): _a = [] def _UpperCAmelCase ( self ) -> int: return list(self.graph ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]: if s == d: return [] _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple: if c == -1: _a = floor(random() * 10000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _a = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]: _a = deque() _a = [] if s == -2: _a = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple: _a = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict: return len(self.graph[u] ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s _a = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return sorted_nodes def _UpperCAmelCase ( self ) -> Optional[int]: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Any: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]: _a = time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _a = time() return end - begin def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]: _a = time() self.bfs(__UpperCAmelCase ) _a = time() return end - begin class __lowerCamelCase : '''simple docstring''' def __init__( self ) -> Optional[int]: _a = {} def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict: # check if the u exists if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist _a = [[w, v]] # add the other way if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist _a = [[w, u]] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) # the other way round if self.graph.get(__UpperCAmelCase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict: if s == d: return [] _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple: if c == -1: _a = floor(random() * 10000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _a = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]: _a = deque() _a = [] if s == -2: _a = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict: return len(self.graph[u] ) def _UpperCAmelCase ( self ) -> int: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _UpperCAmelCase ( self ) -> Union[str, Any]: return list(self.graph ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple: _a = time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _a = time() return end - begin def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _a = time() self.bfs(__UpperCAmelCase ) _a = time() return end - begin
320
1
"""simple docstring""" import os from pathlib import Path def A_ ( ): """simple docstring""" from torch.utils.cpp_extension import load _a = Path(_lowerCAmelCase ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr''' _a = [ root / filename for filename in [ '''vision.cpp''', os.path.join('''cpu''', '''ms_deform_attn_cpu.cpp''' ), os.path.join('''cuda''', '''ms_deform_attn_cuda.cu''' ), ] ] load( '''MultiScaleDeformableAttention''', _lowerCAmelCase, with_cuda=_lowerCAmelCase, extra_include_paths=[str(_lowerCAmelCase )], extra_cflags=['''-DWITH_CUDA=1'''], extra_cuda_cflags=[ '''-DCUDA_HAS_FP16=1''', '''-D__CUDA_NO_HALF_OPERATORS__''', '''-D__CUDA_NO_HALF_CONVERSIONS__''', '''-D__CUDA_NO_HALF2_OPERATORS__''', ], ) import MultiScaleDeformableAttention as MSDA return MSDA
320
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Dict = 'unispeech' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) -> Union[str, Any]: super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) _a = hidden_size _a = feat_extract_norm _a = feat_extract_activation _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = conv_bias _a = num_conv_pos_embeddings _a = num_conv_pos_embedding_groups _a = len(self.conv_dim ) _a = num_hidden_layers _a = intermediate_size _a = hidden_act _a = num_attention_heads _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = feat_proj_dropout _a = final_dropout _a = layerdrop _a = layer_norm_eps _a = initializer_range _a = num_ctc_classes _a = vocab_size _a = do_stable_layer_norm _a = use_weighted_layer_sum _a = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a = apply_spec_augment _a = mask_time_prob _a = mask_time_length _a = mask_time_min_masks _a = mask_feature_prob _a = mask_feature_length _a = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _a = num_codevectors_per_group _a = num_codevector_groups _a = contrastive_logits_temperature _a = feat_quantizer_dropout _a = num_negatives _a = codevector_dim _a = proj_codevector_dim _a = diversity_loss_weight # ctc loss _a = ctc_loss_reduction _a = ctc_zero_infinity # pretraining loss _a = replace_prob @property def _UpperCAmelCase ( self ) -> Optional[int]: return functools.reduce(operator.mul , self.conv_stride , 1 )
320
1
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __snake_case = datasets.logging.get_logger(__name__) __snake_case = '''\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric", author = "Moosavi, Nafise Sadat and Strube, Michael", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = aug, year = "2016", address = "Berlin, Germany", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P16-1060", doi = "10.18653/v1/P16-1060", pages = "632--642", } ''' __snake_case = '''\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. ''' __snake_case = ''' Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting \'keep_singletons=False\', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs. min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: \'mentions\': mentions \'muc\': MUC metric [Vilain et al, 1995] \'bcub\': B-cubed [Bagga and Baldwin, 1998] \'ceafe\': CEAFe [Luo et al., 2005] \'lea\': LEA [Moosavi and Strube, 2016] \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric(\'coval\') >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\', ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\', ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\', ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\', ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\', ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0} ''' def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : List[Any], _lowerCAmelCase : Union[str, Any]=False, _lowerCAmelCase : Any=False, _lowerCAmelCase : int=True, _lowerCAmelCase : Any=False, _lowerCAmelCase : str="dummy_doc" ): """simple docstring""" _a = {doc: key_lines} _a = {doc: sys_lines} _a = {} _a = 0 _a = 0 _a = 0 _a = 0 _a = 0 _a = 0 _a , _a = reader.get_doc_mentions(_lowerCAmelCase, key_doc_lines[doc], _lowerCAmelCase ) key_singletons_num += singletons_num if NP_only or min_span: _a = reader.set_annotated_parse_trees(_lowerCAmelCase, key_doc_lines[doc], _lowerCAmelCase, _lowerCAmelCase ) _a , _a = reader.get_doc_mentions(_lowerCAmelCase, sys_doc_lines[doc], _lowerCAmelCase ) sys_singletons_num += singletons_num if NP_only or min_span: _a = reader.set_annotated_parse_trees(_lowerCAmelCase, key_doc_lines[doc], _lowerCAmelCase, _lowerCAmelCase ) if remove_nested: _a , _a = reader.remove_nested_coref_mentions(_lowerCAmelCase, _lowerCAmelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _a , _a = reader.remove_nested_coref_mentions(_lowerCAmelCase, _lowerCAmelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _a = reader.get_mention_assignments(_lowerCAmelCase, _lowerCAmelCase ) _a = reader.get_mention_assignments(_lowerCAmelCase, _lowerCAmelCase ) _a = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( '''Number of removed nested coreferring mentions in the key ''' f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( '''Number of resulting singleton clusters in the key ''' f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' '''files, respectively''' ) return doc_coref_infos def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[int], _lowerCAmelCase : Optional[int], _lowerCAmelCase : Optional[int], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[int], _lowerCAmelCase : List[str] ): """simple docstring""" _a = get_coref_infos(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _a = {} _a = 0 _a = 0 for name, metric in metrics: _a , _a , _a = evaluator.evaluate_documents(_lowerCAmelCase, _lowerCAmelCase, beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} ) logger.info( name.ljust(10 ), f'Recall: {recall * 1_00:.2f}', f' Precision: {precision * 1_00:.2f}', f' F1: {fa * 1_00:.2f}', ) if conll_subparts_num == 3: _a = (conll / 3) * 1_00 logger.info(f'CoNLL score: {conll:.2f}' ) output_scores.update({'''conll_score''': conll} ) return output_scores def A_ ( _lowerCAmelCase : int ): """simple docstring""" _a = False for line in key_lines: if not line.startswith('''#''' ): if len(line.split() ) > 6: _a = line.split()[5] if not parse_col == "-": _a = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCamelCase ( datasets.Metric ): '''simple docstring''' def _UpperCAmelCase ( self ) -> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Sequence(datasets.Value('''string''' ) ), } ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[ '''https://github.com/ns-moosavi/coval''', '''https://www.aclweb.org/anthology/P16-1060''', '''http://www.conll.cemantix.org/2012/data.html''', ] , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]: _a = [ ('''mentions''', evaluator.mentions), ('''muc''', evaluator.muc), ('''bcub''', evaluator.b_cubed), ('''ceafe''', evaluator.ceafe), ('''lea''', evaluator.lea), ] if min_span: _a = util.check_gold_parse_annotation(__UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _a = evaluate( key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , ) return score
320
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: __snake_case = None __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } __snake_case = { '''google/rembert''': 256, } __snake_case = '''▁''' class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Optional[Any] = VOCAB_FILES_NAMES A_ : List[str] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : List[Any] = RemBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it _a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) _a = do_lower_case _a = remove_space _a = keep_accents _a = vocab_file _a = False if not self.vocab_file else True def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(__UpperCAmelCase ) ) return _a = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
320
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __snake_case = logging.get_logger(__name__) def A_ ( _lowerCAmelCase : Union[str, Any] ): """simple docstring""" _a = DPTConfig(embedding_type='''hybrid''' ) if "large" in checkpoint_url: _a = 10_24 _a = 40_96 _a = 24 _a = 16 _a = [5, 11, 17, 23] _a = [2_56, 5_12, 10_24, 10_24] _a = (1, 3_84, 3_84) if "nyu" or "midas" in checkpoint_url: _a = 7_68 _a = [1, 1, 1, 0.5] _a = [2_56, 5_12, 7_68, 7_68] _a = 1_50 _a = 16 _a = (1, 3_84, 3_84) _a = False _a = '''project''' if "ade" in checkpoint_url: _a = True _a = 7_68 _a = [1, 1, 1, 0.5] _a = 1_50 _a = 16 _a = '''huggingface/label-files''' _a = '''ade20k-id2label.json''' _a = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase, _lowerCAmelCase, repo_type='''dataset''' ) ), '''r''' ) ) _a = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} _a = idalabel _a = {v: k for k, v in idalabel.items()} _a = [1, 1_50, 4_80, 4_80] return config, expected_shape def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" _a = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(_lowerCAmelCase, _lowerCAmelCase ) def A_ ( _lowerCAmelCase : Any ): """simple docstring""" if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): _a = name.replace('''pretrained.model''', '''dpt.encoder''' ) if "pretrained.model" in name: _a = name.replace('''pretrained.model''', '''dpt.embeddings''' ) if "patch_embed" in name: _a = name.replace('''patch_embed''', '''''' ) if "pos_embed" in name: _a = name.replace('''pos_embed''', '''position_embeddings''' ) if "attn.proj" in name: _a = name.replace('''attn.proj''', '''attention.output.dense''' ) if "proj" in name and "project" not in name: _a = name.replace('''proj''', '''projection''' ) if "blocks" in name: _a = name.replace('''blocks''', '''layer''' ) if "mlp.fc1" in name: _a = name.replace('''mlp.fc1''', '''intermediate.dense''' ) if "mlp.fc2" in name: _a = name.replace('''mlp.fc2''', '''output.dense''' ) if "norm1" in name and "backbone" not in name: _a = name.replace('''norm1''', '''layernorm_before''' ) if "norm2" in name and "backbone" not in name: _a = name.replace('''norm2''', '''layernorm_after''' ) if "scratch.output_conv" in name: _a = name.replace('''scratch.output_conv''', '''head''' ) if "scratch" in name: _a = name.replace('''scratch''', '''neck''' ) if "layer1_rn" in name: _a = name.replace('''layer1_rn''', '''convs.0''' ) if "layer2_rn" in name: _a = name.replace('''layer2_rn''', '''convs.1''' ) if "layer3_rn" in name: _a = name.replace('''layer3_rn''', '''convs.2''' ) if "layer4_rn" in name: _a = name.replace('''layer4_rn''', '''convs.3''' ) if "refinenet" in name: _a = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 _a = name.replace(f'refinenet{layer_idx}', f'fusion_stage.layers.{abs(layer_idx-4 )}' ) if "out_conv" in name: _a = name.replace('''out_conv''', '''projection''' ) if "resConfUnit1" in name: _a = name.replace('''resConfUnit1''', '''residual_layer1''' ) if "resConfUnit2" in name: _a = name.replace('''resConfUnit2''', '''residual_layer2''' ) if "conv1" in name: _a = name.replace('''conv1''', '''convolution1''' ) if "conv2" in name: _a = name.replace('''conv2''', '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: _a = name.replace('''pretrained.act_postprocess1.0.project.0''', '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: _a = name.replace('''pretrained.act_postprocess2.0.project.0''', '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: _a = name.replace('''pretrained.act_postprocess3.0.project.0''', '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: _a = name.replace('''pretrained.act_postprocess4.0.project.0''', '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: _a = name.replace('''pretrained.act_postprocess1.3''', '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: _a = name.replace('''pretrained.act_postprocess1.4''', '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: _a = name.replace('''pretrained.act_postprocess2.3''', '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: _a = name.replace('''pretrained.act_postprocess2.4''', '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: _a = name.replace('''pretrained.act_postprocess3.3''', '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: _a = name.replace('''pretrained.act_postprocess4.3''', '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: _a = name.replace('''pretrained.act_postprocess4.4''', '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: _a = name.replace('''pretrained''', '''dpt''' ) if "bn" in name: _a = name.replace('''bn''', '''batch_norm''' ) if "head" in name: _a = name.replace('''head''', '''head.head''' ) if "encoder.norm" in name: _a = name.replace('''encoder.norm''', '''layernorm''' ) if "auxlayer" in name: _a = name.replace('''auxlayer''', '''auxiliary_head.head''' ) if "backbone" in name: _a = name.replace('''backbone''', '''backbone.bit.encoder''' ) if ".." in name: _a = name.replace('''..''', '''.''' ) if "stem.conv" in name: _a = name.replace('''stem.conv''', '''bit.embedder.convolution''' ) if "blocks" in name: _a = name.replace('''blocks''', '''layers''' ) if "convolution" in name and "backbone" in name: _a = name.replace('''convolution''', '''conv''' ) if "layer" in name and "backbone" in name: _a = name.replace('''layer''', '''layers''' ) if "backbone.bit.encoder.bit" in name: _a = name.replace('''backbone.bit.encoder.bit''', '''backbone.bit''' ) if "embedder.conv" in name: _a = name.replace('''embedder.conv''', '''embedder.convolution''' ) if "backbone.bit.encoder.stem.norm" in name: _a = name.replace('''backbone.bit.encoder.stem.norm''', '''backbone.bit.embedder.norm''' ) return name def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _a = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' ) _a = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _a = in_proj_weight[: config.hidden_size, :] _a = in_proj_bias[: config.hidden_size] _a = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _a = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _a = in_proj_weight[ -config.hidden_size :, : ] _a = in_proj_bias[-config.hidden_size :] def A_ ( ): """simple docstring""" _a = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _a = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : List[str], _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple, _lowerCAmelCase : List[Any] ): """simple docstring""" _a , _a = get_dpt_config(_lowerCAmelCase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") _a = torch.load(_lowerCAmelCase, map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(_lowerCAmelCase ) # rename keys for key in state_dict.copy().keys(): _a = state_dict.pop(_lowerCAmelCase ) _a = val # read in qkv matrices read_in_q_k_v(_lowerCAmelCase, _lowerCAmelCase ) # load HuggingFace model _a = DPTForSemanticSegmentation(_lowerCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) model.eval() # Check outputs on an image _a = 4_80 if '''ade''' in checkpoint_url else 3_84 _a = DPTImageProcessor(size=_lowerCAmelCase ) _a = prepare_img() _a = image_processor(_lowerCAmelCase, return_tensors='''pt''' ) # forward pass _a = model(**_lowerCAmelCase ).logits if '''ade''' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth if show_prediction: _a = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ), size=(image.size[1], image.size[0]), mode='''bicubic''', align_corners=_lowerCAmelCase, ) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 2_55 ).show() if pytorch_dump_folder_path is not None: Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(_lowerCAmelCase ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(_lowerCAmelCase ) if push_to_hub: model.push_to_hub('''ybelkada/dpt-hybrid-midas''' ) image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''', type=str, help='''URL of the original DPT checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) parser.add_argument( '''--model_name''', default='''dpt-large''', type=str, help='''Name of the model, in case you\'re pushing to the hub.''', ) parser.add_argument( '''--show_prediction''', action='''store_true''', ) __snake_case = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __snake_case = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __snake_case = '''pt''' elif is_tf_available(): __snake_case = '''tf''' else: __snake_case = '''jax''' class __lowerCamelCase ( a__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = PerceiverTokenizer A_ : Optional[Any] = False def _UpperCAmelCase ( self ) -> Optional[int]: super().setUp() _a = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _UpperCAmelCase ( self ) -> int: return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> PerceiverTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=20 , __UpperCAmelCase=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. _a = [] for i in range(len(__UpperCAmelCase ) ): try: _a = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) _a = list(filter(lambda __UpperCAmelCase : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , __UpperCAmelCase ) ) _a = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCAmelCase ) , __UpperCAmelCase ) ) if max_length is not None and len(__UpperCAmelCase ) > max_length: _a = toks[:max_length] if min_length is not None and len(__UpperCAmelCase ) < min_length and len(__UpperCAmelCase ) > 0: while len(__UpperCAmelCase ) < min_length: _a = toks + toks # toks_str = [t[1] for t in toks] _a = [t[0] for t in toks] # Ensure consistency _a = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase ) if " " not in output_txt and len(__UpperCAmelCase ) > 1: _a = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCAmelCase ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCAmelCase ) ) if with_prefix_space: _a = ''' ''' + output_txt _a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) return output_txt, output_ids def _UpperCAmelCase ( self ) -> Optional[int]: _a = self.perceiver_tokenizer _a = '''Unicode €.''' _a = tokenizer(__UpperCAmelCase ) _a = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['''input_ids'''] , __UpperCAmelCase ) # decoding _a = tokenizer.decode(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , '''[CLS]Unicode €.[SEP]''' ) _a = tokenizer('''e è é ê ë''' ) _a = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['''input_ids'''] , __UpperCAmelCase ) # decoding _a = tokenizer.decode(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def _UpperCAmelCase ( self ) -> Any: _a = self.perceiver_tokenizer _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off _a = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on _a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) if FRAMEWORK != "jax": _a = list(batch.input_ids.numpy()[0] ) else: _a = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def _UpperCAmelCase ( self ) -> int: _a = self.perceiver_tokenizer _a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] _a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , __UpperCAmelCase ) self.assertIn('''attention_mask''' , __UpperCAmelCase ) self.assertNotIn('''decoder_input_ids''' , __UpperCAmelCase ) self.assertNotIn('''decoder_attention_mask''' , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[str]: _a = self.perceiver_tokenizer _a = [ '''Summary of the text.''', '''Another summary.''', ] _a = tokenizer( text_target=__UpperCAmelCase , max_length=32 , padding='''max_length''' , truncation=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def _UpperCAmelCase ( self ) -> Optional[int]: # safety check on max_len default value so we are sure the test works _a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _a = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc _a = tempfile.mkdtemp() _a = ''' He is very happy, UNwant\u00E9d,running''' _a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) tokenizer.save_pretrained(__UpperCAmelCase ) _a = tokenizer.__class__.from_pretrained(__UpperCAmelCase ) _a = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) shutil.rmtree(__UpperCAmelCase ) _a = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc _a = tempfile.mkdtemp() _a = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) _a = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) _a = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) tokenizer.save_pretrained(__UpperCAmelCase ) _a = tokenizer.__class__.from_pretrained(__UpperCAmelCase ) _a = after_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _a = tokenizer.__class__.from_pretrained(__UpperCAmelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Any: _a = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__UpperCAmelCase ) with open(os.path.join(__UpperCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: _a = json.load(__UpperCAmelCase ) with open(os.path.join(__UpperCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: _a = json.load(__UpperCAmelCase ) _a = [F'<extra_id_{i}>' for i in range(125 )] _a = added_tokens_extra_ids + [ '''an_additional_special_token''' ] _a = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(__UpperCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__UpperCAmelCase , __UpperCAmelCase ) with open(os.path.join(__UpperCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(__UpperCAmelCase , __UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _a = tokenizer_class.from_pretrained( __UpperCAmelCase , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _a = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__UpperCAmelCase )] _a = tokenizer_class.from_pretrained( __UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def _UpperCAmelCase ( self ) -> Optional[int]: _a = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '''�''' ) def _UpperCAmelCase ( self ) -> str: pass def _UpperCAmelCase ( self ) -> Any: pass def _UpperCAmelCase ( self ) -> List[Any]: pass def _UpperCAmelCase ( self ) -> str: pass def _UpperCAmelCase ( self ) -> Dict: # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens _a = self.get_tokenizers(fast=__UpperCAmelCase , do_lower_case=__UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): _a = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] _a = tokenizer.convert_tokens_to_string(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
320
"""simple docstring""" import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __lowerCamelCase ( a__ ): '''simple docstring''' @require_torch def _UpperCAmelCase ( self ) -> Union[str, Any]: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache _a = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__UpperCAmelCase ) BertModel.from_pretrained(__UpperCAmelCase ) BertTokenizer.from_pretrained(__UpperCAmelCase ) pipeline(task='''fill-mask''' , model=__UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed _a = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a = '''1''' _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache _a = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__UpperCAmelCase ) BertModel.from_pretrained(__UpperCAmelCase ) BertTokenizer.from_pretrained(__UpperCAmelCase ) pipeline(task='''fill-mask''' , model=__UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed _a = self.get_env() _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def _UpperCAmelCase ( self ) -> Optional[Any]: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a = ''' from transformers import BertConfig, BertModel, BertTokenizer ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket ''' # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed _a = self.get_env() _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # next emulate no network _a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a = '''1''' _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def _UpperCAmelCase ( self ) -> Tuple: _a = ''' from transformers import pipeline ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket ''' _a = self.get_env() _a = '''1''' _a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( '''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: _a = ''' from transformers import AutoModel ''' _a = ''' mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") ''' # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed _a = self.get_env() _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a = '''1''' _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() )
320
1
"""simple docstring""" import os def A_ ( _lowerCAmelCase : str = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(_lowerCAmelCase ), _lowerCAmelCase ) ) as input_file: _a = [ [int(_lowerCAmelCase ) for element in line.split(''',''' )] for line in input_file.readlines() ] _a = len(_lowerCAmelCase ) _a = len(matrix[0] ) _a = [[-1 for _ in range(_lowerCAmelCase )] for _ in range(_lowerCAmelCase )] for i in range(_lowerCAmelCase ): _a = matrix[i][0] for j in range(1, _lowerCAmelCase ): for i in range(_lowerCAmelCase ): _a = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1, _lowerCAmelCase ): _a = min( minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2, -1, -1 ): _a = min( minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'{solution() = }')
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : str = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Any = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Dict = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Tuple = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Any = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ['''flax'''] )
320
1
"""simple docstring""" import re def A_ ( _lowerCAmelCase : str ): """simple docstring""" _a = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(_lowerCAmelCase, _lowerCAmelCase ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('''+918827897895'''))
320
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __snake_case = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __snake_case = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' __snake_case = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" def remove_articles(_lowerCAmelCase : Optional[int] ): _a = re.compile(R'''\b(a|an|the)\b''', re.UNICODE ) return re.sub(_lowerCAmelCase, ''' ''', _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase : Tuple ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase : Tuple ): _a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase : List[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ): """simple docstring""" return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ): """simple docstring""" _a = [any(compute_exact(_lowerCAmelCase, _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 1_00 def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : str ): """simple docstring""" _a = [rgram for rgrams in rgramslist for rgram in rgrams] _a = Counter(_lowerCAmelCase ) _a = Counter(_lowerCAmelCase ) _a = Counter() for sgram, scount in sgramcounter.items(): _a = scount * numref _a = Counter(_lowerCAmelCase ) _a = Counter() for cgram, ccount in cgramcounter.items(): _a = ccount * numref # KEEP _a = sgramcounter_rep & cgramcounter_rep _a = keepgramcounter_rep & rgramcounter _a = sgramcounter_rep & rgramcounter _a = 0 _a = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(_lowerCAmelCase ) > 0: _a = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _a = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _a = 0 if keepscore_precision > 0 or keepscore_recall > 0: _a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _a = sgramcounter_rep - cgramcounter_rep _a = delgramcounter_rep - rgramcounter _a = sgramcounter_rep - rgramcounter _a = 0 _a = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 if len(_lowerCAmelCase ) > 0: _a = deltmpscorea / len(_lowerCAmelCase ) # ADDITION _a = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) _a = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) _a = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) _a = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(_lowerCAmelCase ) > 0: _a = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: _a = addtmpscore / len(_lowerCAmelCase ) _a = 0 if addscore_precision > 0 or addscore_recall > 0: _a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Any ): """simple docstring""" _a = len(_lowerCAmelCase ) _a = ssent.split(''' ''' ) _a = csent.split(''' ''' ) _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] for rsent in rsents: _a = rsent.split(''' ''' ) _a = [] _a = [] _a = [] ragramslist.append(_lowerCAmelCase ) for i in range(0, len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: _a = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0, len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: _a = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0, len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: _a = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _a = sum([delascore, delascore, delascore, delascore] ) / 4 _a = sum([addascore, addascore, addascore, addascore] ) / 4 _a = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : bool = True, _lowerCAmelCase : str = "13a", _lowerCAmelCase : bool = True ): """simple docstring""" if lowercase: _a = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _a = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: _a = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": _a = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase, escape=_lowerCAmelCase ) elif tokenizer == "penn": _a = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase ) else: _a = sentence if not return_str: _a = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ): """simple docstring""" if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _a = 0 for src, pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ), normalize(_lowerCAmelCase ), [normalize(_lowerCAmelCase ) for sent in refs] ) _a = sari_score / len(_lowerCAmelCase ) return 1_00 * sari_score def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple, _lowerCAmelCase : Any="exp", _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Union[str, Any]=False, _lowerCAmelCase : Optional[Any]=False, _lowerCAmelCase : List[str]=False, ): """simple docstring""" _a = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _a = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] _a = sacrebleu.corpus_bleu( _lowerCAmelCase, _lowerCAmelCase, smooth_method=_lowerCAmelCase, smooth_value=_lowerCAmelCase, force=_lowerCAmelCase, lowercase=_lowerCAmelCase, use_effective_order=_lowerCAmelCase, ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCamelCase ( datasets.Metric ): '''simple docstring''' def _UpperCAmelCase ( self ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: _a = {} result.update({'''sari''': compute_sari(sources=__UpperCAmelCase , predictions=__UpperCAmelCase , references=__UpperCAmelCase )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} ) result.update({'''exact''': compute_em(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} ) return result
320
1
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging __snake_case = logging.get_logger(__name__) def A_ ( ): """simple docstring""" _a = os.getenv('''SM_HP_MP_PARAMETERS''', '''{}''' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. _a = json.loads(_lowerCAmelCase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. _a = os.getenv('''SM_FRAMEWORK_PARAMS''', '''{}''' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". _a = json.loads(_lowerCAmelCase ) if not mpi_options.get('''sagemaker_mpi_enabled''', _lowerCAmelCase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : str = field( default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , ) def _UpperCAmelCase ( self ) -> List[Any]: super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''' , __UpperCAmelCase , ) @cached_property def _UpperCAmelCase ( self ) -> "torch.device": logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: _a = torch.device('''cpu''' ) _a = 0 elif is_sagemaker_model_parallel_available(): _a = smp.local_rank() _a = torch.device('''cuda''' , __UpperCAmelCase ) _a = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta ) _a = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) _a = torch.device('''cuda''' , self.local_rank ) _a = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 _a = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. _a = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta ) _a = torch.device('''cuda''' , self.local_rank ) _a = 1 if device.type == "cuda": torch.cuda.set_device(__UpperCAmelCase ) return device @property def _UpperCAmelCase ( self ) -> int: if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def _UpperCAmelCase ( self ) -> Dict: return not is_sagemaker_model_parallel_available() @property def _UpperCAmelCase ( self ) -> str: return False
320
"""simple docstring""" def A_ ( _lowerCAmelCase : int = 50 ): """simple docstring""" _a = [1] * (length + 1) for row_length in range(3, length + 1 ): for block_length in range(3, row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
320
1
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> Optional[int]: _a = inspect.getfile(accelerate.test_utils ) _a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) _a = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) _a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def _UpperCAmelCase ( self ) -> List[str]: print(F'Found {torch.cuda.device_count()} devices.' ) _a = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def _UpperCAmelCase ( self ) -> List[str]: print(F'Found {torch.cuda.device_count()} devices.' ) _a = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path] print(F'Command: {cmd}' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def _UpperCAmelCase ( self ) -> List[str]: _a = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) @require_multi_gpu def _UpperCAmelCase ( self ) -> Any: print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' ) _a = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": __snake_case = Accelerator() __snake_case = (accelerator.state.process_index + 2, 10) __snake_case = torch.randint(0, 10, shape).to(accelerator.device) __snake_case = '''''' __snake_case = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." __snake_case = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." __snake_case = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
320
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __snake_case = logging.get_logger('''transformers.models.speecht5''') __snake_case = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } __snake_case = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } __snake_case = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } __snake_case = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } __snake_case = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } __snake_case = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } __snake_case = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } __snake_case = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } __snake_case = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __snake_case = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __snake_case = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __snake_case = [] __snake_case = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] __snake_case = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] __snake_case = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] __snake_case = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[int] ): """simple docstring""" for attribute in key.split('''.''' ): _a = getattr(_lowerCAmelCase, _lowerCAmelCase ) if weight_type is not None: _a = getattr(_lowerCAmelCase, _lowerCAmelCase ).shape else: _a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": _a = value elif weight_type == "weight_g": _a = value elif weight_type == "weight_v": _a = value elif weight_type == "bias": _a = value elif weight_type == "running_mean": _a = value elif weight_type == "running_var": _a = value elif weight_type == "num_batches_tracked": _a = value else: _a = value logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple ): """simple docstring""" for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: _a , _a = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int ): """simple docstring""" _a = [] if task == "s2t": _a = hf_model.speechta.encoder.prenet.feature_encoder _a = MAPPING_S2T _a = IGNORE_KEYS_S2T elif task == "t2s": _a = None _a = MAPPING_T2S _a = IGNORE_KEYS_T2S elif task == "s2s": _a = hf_model.speechta.encoder.prenet.feature_encoder _a = MAPPING_S2S _a = IGNORE_KEYS_S2S else: raise ValueError(f'Unsupported task: {task}' ) for name, value in fairseq_dict.items(): if should_ignore(_lowerCAmelCase, _lowerCAmelCase ): logger.info(f'{name} was ignored' ) continue _a = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', ) _a = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: _a , _a = key.split('''.*.''' ) if prefix in name and suffix in name: _a = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: _a = True if "*" in mapped_key: _a = name.split(_lowerCAmelCase )[0].split('''.''' )[-2] _a = mapped_key.replace('''*''', _lowerCAmelCase ) if "weight_g" in name: _a = '''weight_g''' elif "weight_v" in name: _a = '''weight_v''' elif "bias" in name: _a = '''bias''' elif "weight" in name: _a = '''weight''' elif "running_mean" in name: _a = '''running_mean''' elif "running_var" in name: _a = '''running_var''' elif "num_batches_tracked" in name: _a = '''num_batches_tracked''' else: _a = None set_recursively(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f'Unused weights: {unused_weights}' ) def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any] ): """simple docstring""" _a = full_name.split('''conv_layers.''' )[-1] _a = name.split('''.''' ) _a = int(items[0] ) _a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _a = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _a = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) _a = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) _a = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : int=None, ): """simple docstring""" if config_path is not None: _a = SpeechTaConfig.from_pretrained(_lowerCAmelCase ) else: _a = SpeechTaConfig() if task == "s2t": _a = config.max_text_positions _a = SpeechTaForSpeechToText(_lowerCAmelCase ) elif task == "t2s": _a = 18_76 _a = 6_00 _a = config.max_speech_positions _a = SpeechTaForTextToSpeech(_lowerCAmelCase ) elif task == "s2s": _a = 18_76 _a = config.max_speech_positions _a = SpeechTaForSpeechToSpeech(_lowerCAmelCase ) else: raise ValueError(f'Unknown task name: {task}' ) if vocab_path: _a = SpeechTaTokenizer(_lowerCAmelCase, model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it _a = AddedToken('''<mask>''', lstrip=_lowerCAmelCase, rstrip=_lowerCAmelCase ) _a = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) _a = SpeechTaFeatureExtractor() _a = SpeechTaProcessor(tokenizer=_lowerCAmelCase, feature_extractor=_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) _a = torch.load(_lowerCAmelCase ) recursively_load_weights(fairseq_checkpoint['''model'''], _lowerCAmelCase, _lowerCAmelCase ) model.save_pretrained(_lowerCAmelCase ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(_lowerCAmelCase ) model.push_to_hub(_lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __snake_case = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
320
1
"""simple docstring""" from PIL import Image def A_ ( _lowerCAmelCase : Image, _lowerCAmelCase : int ): """simple docstring""" _a = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level)) def contrast(_lowerCAmelCase : int ) -> int: return int(1_28 + factor * (c - 1_28) ) return img.point(_lowerCAmelCase ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 __snake_case = change_contrast(img, 170) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
320
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[Any] = 'decision_transformer' A_ : Union[str, Any] = ['past_key_values'] A_ : str = { 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , __UpperCAmelCase=17 , __UpperCAmelCase=4 , __UpperCAmelCase=128 , __UpperCAmelCase=4096 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=1024 , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[int]: _a = state_dim _a = act_dim _a = hidden_size _a = max_ep_len _a = action_tanh _a = vocab_size _a = n_positions _a = n_layer _a = n_head _a = n_inner _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = scale_attn_weights _a = use_cache _a = scale_attn_by_inverse_layer_idx _a = reorder_and_upcast_attn _a = bos_token_id _a = eos_token_id super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
320
1
"""simple docstring""" # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter __snake_case = logging.get_logger(__name__) __snake_case = {} __snake_case = {} __snake_case = {} def A_ ( _lowerCAmelCase : type, _lowerCAmelCase : Optional[str], _lowerCAmelCase : Optional[List[str]] = None, ): """simple docstring""" _a = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' ) _a = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' ) _a = format_type def A_ ( _lowerCAmelCase : Exception, _lowerCAmelCase : Optional[str], _lowerCAmelCase : Optional[List[str]] = None ): """simple docstring""" _a = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): _a = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: __snake_case = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: __snake_case = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: __snake_case = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def A_ ( _lowerCAmelCase : Optional[str] ): """simple docstring""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def A_ ( _lowerCAmelCase : Optional[str], **_lowerCAmelCase : Dict ): """simple docstring""" _a = get_format_type_from_alias(_lowerCAmelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**_lowerCAmelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __snake_case = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[str] = ['pixel_values'] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None: super().__init__(**__UpperCAmelCase ) _a = size if size is not None else {'''shortest_edge''': 224} _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) _a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) _a = do_resize _a = size _a = resample _a = do_center_crop _a = crop_size _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _a = image_std if image_std is not None else OPENAI_CLIP_STD _a = do_convert_rgb def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _a = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: _a = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]: return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image: _a = do_resize if do_resize is not None else self.do_resize _a = size if size is not None else self.size _a = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) _a = resample if resample is not None else self.resample _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _a = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _a = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. _a = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: _a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: _a = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: _a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: _a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] _a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
320
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Tuple = 'cvt' def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 192, 384] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-1_2 , **__UpperCAmelCase , ) -> Dict: super().__init__(**__UpperCAmelCase ) _a = num_channels _a = patch_sizes _a = patch_stride _a = patch_padding _a = embed_dim _a = num_heads _a = depth _a = mlp_ratio _a = attention_drop_rate _a = drop_rate _a = drop_path_rate _a = qkv_bias _a = cls_token _a = qkv_projection_method _a = kernel_qkv _a = padding_kv _a = stride_kv _a = padding_q _a = stride_q _a = initializer_range _a = layer_norm_eps
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" from __future__ import annotations from math import pi, sqrt def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float ): """simple docstring""" if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge __snake_case = [ '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the''' ''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe''' ''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''', '''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal''' ''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s''' ''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the''' ''' body.''', '''Amnesty International releases its annual report on the death penalty. The report catalogs the use of''' ''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the''' ''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital''' ''' punishment.''', ] __snake_case = [ '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''' ''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz''' ''' had informed his Lufthansa training school of an episode of severe depression, airline says .''', '''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .''' ''' Israel and the United States opposed the move, which could open the door to war crimes investigations against''' ''' Israelis .''', '''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to''' ''' death . Organization claims that governments around the world are using the threat of terrorism to advance''' ''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death''' ''' sentences up by 28% .''', ] def A_ ( ): """simple docstring""" _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase ) _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2'''] ) assert ( pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean() ) def A_ ( ): """simple docstring""" _a = '''rougeLsum''' _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k] assert score > score_no_sep def A_ ( ): """simple docstring""" _a = ['''rouge1''', '''rouge2''', '''rougeL'''] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase ) _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase ) assert score_sep == score_no_sep def A_ ( ): """simple docstring""" _a = [ '''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''', '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''', ] _a = [ '''Margot Frank, died in 1945, a month earlier than previously thought.''', '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of''' ''' the final seconds on board Flight 9525.''', ] assert calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) def A_ ( ): """simple docstring""" _a = [ '''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" ''' ] _a = [ ''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .''' ] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=_lowerCAmelCase )['''rougeLsum'''] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum'''] assert new_score > prev_score def A_ ( ): """simple docstring""" _a = Path('''examples/seq2seq/test_data/wmt_en_ro''' ) _a = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase ) _a = calculate_rouge_path( data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=_lowerCAmelCase ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
320
1
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __snake_case = logging.get_logger(__name__) __snake_case = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Optional[int] = 'deformable_detr' A_ : Any = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=3 , __UpperCAmelCase=300 , __UpperCAmelCase=1024 , __UpperCAmelCase=6 , __UpperCAmelCase=1024 , __UpperCAmelCase=8 , __UpperCAmelCase=6 , __UpperCAmelCase=1024 , __UpperCAmelCase=8 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1.0 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="sine" , __UpperCAmelCase="resnet50" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=False , __UpperCAmelCase=300 , __UpperCAmelCase=False , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.25 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> List[str]: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) _a = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _a = backbone_config.get('''model_type''' ) _a = CONFIG_MAPPING[backbone_model_type] _a = config_class.from_dict(__UpperCAmelCase ) _a = use_timm_backbone _a = backbone_config _a = num_channels _a = num_queries _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = init_xavier_std _a = encoder_layerdrop _a = auxiliary_loss _a = position_embedding_type _a = backbone _a = use_pretrained_backbone _a = dilation # deformable attributes _a = num_feature_levels _a = encoder_n_points _a = decoder_n_points _a = two_stage _a = two_stage_num_proposals _a = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher _a = class_cost _a = bbox_cost _a = giou_cost # Loss coefficients _a = mask_loss_coefficient _a = dice_loss_coefficient _a = bbox_loss_coefficient _a = giou_loss_coefficient _a = eos_coefficient _a = focal_alpha _a = disable_custom_kernels super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase ) @property def _UpperCAmelCase ( self ) -> int: return self.encoder_attention_heads @property def _UpperCAmelCase ( self ) -> int: return self.d_model def _UpperCAmelCase ( self ) -> str: _a = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _a = self.backbone_config.to_dict() _a = self.__class__.model_type return output
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor __snake_case = logging.get_logger(__name__) class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: warnings.warn( '''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
320
1
"""simple docstring""" import requests __snake_case = '''YOUR API KEY''' def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : str = giphy_api_key ): """simple docstring""" _a = '''+'''.join(query.split() ) _a = f'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}' _a = requests.get(_lowerCAmelCase ).json()['''data'''] return [gif["url"] for gif in gifs] if __name__ == "__main__": print('''\n'''.join(get_gifs('''space ship''')))
320
"""simple docstring""" from __future__ import annotations def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ): """simple docstring""" if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" from functools import reduce __snake_case = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def A_ ( _lowerCAmelCase : str = N ): """simple docstring""" return max( # mypy cannot properly interpret reduce int(reduce(lambda _lowerCAmelCase, _lowerCAmelCase : str(int(_lowerCAmelCase ) * int(_lowerCAmelCase ) ), n[i : i + 13] ) ) for i in range(len(_lowerCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f'{solution() = }')
320
"""simple docstring""" def A_ ( ): """simple docstring""" _a = [] _a = 1 while len(_lowerCAmelCase ) < 1e6: constant.append(str(_lowerCAmelCase ) ) i += 1 _a = ''''''.join(_lowerCAmelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[9_99] ) * int(constant[99_99] ) * int(constant[9_99_99] ) * int(constant[99_99_99] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Tuple = ['transformers', 'torch', 'note_seq'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
320
"""simple docstring""" import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __snake_case = logging.get_logger(__name__) __snake_case = { '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''', # See all BART models at https://huggingface.co/models?filter=bart } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[str] = 'bart' A_ : Optional[Any] = ['past_key_values'] A_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Tuple: _a = vocab_size _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = encoder_layerdrop _a = decoder_layerdrop _a = classifier_dropout _a = use_cache _a = encoder_layers _a = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): _a = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' '''The config can simply be saved and uploaded again to be fixed.''' ) class __lowerCamelCase ( a__ ): '''simple docstring''' @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _a = {0: '''batch'''} _a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: _a = {0: '''batch''', 1: '''decoder_sequence'''} _a = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _a , _a = self.num_layers for i in range(__UpperCAmelCase ): _a = {0: '''batch''', 2: '''past_sequence + sequence'''} _a = {0: '''batch''', 2: '''past_sequence + sequence'''} else: _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _a = super().outputs else: _a = super(__UpperCAmelCase , self ).outputs if self.use_past: _a , _a = self.num_layers for i in range(__UpperCAmelCase ): _a = {0: '''batch''', 2: '''past_sequence + sequence'''} _a = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs _a = seq_length if not self.use_past else 1 _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} _a = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape _a = common_inputs['''decoder_input_ids'''].shape[1] _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = decoder_seq_length + 3 _a = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _a = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) _a = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _a , _a = self.num_layers _a = min(__UpperCAmelCase , __UpperCAmelCase ) _a = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers _a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. _a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _a = seqlen + 2 _a , _a = self.num_layers _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = common_inputs['''attention_mask'''].dtype _a = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) _a = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _a = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _a = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) _a = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence _a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size _a = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _a = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": _a = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: if self.task in ["default", "seq2seq-lm"]: _a = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: _a = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
320
1
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( '''files''', [ ['''full:README.md''', '''dataset_infos.json'''], ['''empty:README.md''', '''dataset_infos.json'''], ['''dataset_infos.json'''], ['''full:README.md'''], ], ) def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any] ): """simple docstring""" _a = tmp_path_factory.mktemp('''dset_infos_dir''' ) if "full:README.md" in files: with open(dataset_infos_dir / '''README.md''', '''w''' ) as f: f.write('''---\ndataset_info:\n dataset_size: 42\n---''' ) if "empty:README.md" in files: with open(dataset_infos_dir / '''README.md''', '''w''' ) as f: f.write('''''' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f: f.write('''{"default": {"dataset_size": 42}}''' ) _a = DatasetInfosDict.from_directory(_lowerCAmelCase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( '''dataset_info''', [ DatasetInfo(), DatasetInfo( description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ), ], ) def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : DatasetInfo ): """simple docstring""" _a = str(_lowerCAmelCase ) dataset_info.write_to_directory(_lowerCAmelCase ) _a = DatasetInfo.from_directory(_lowerCAmelCase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(_lowerCAmelCase, '''dataset_info.json''' ) ) def A_ ( ): """simple docstring""" _a = DatasetInfo( description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, ) _a = dataset_info._to_yaml_dict() assert sorted(_lowerCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) ) _a = yaml.safe_dump(_lowerCAmelCase ) _a = yaml.safe_load(_lowerCAmelCase ) assert dataset_info_yaml_dict == reloaded def A_ ( ): """simple docstring""" _a = DatasetInfo() _a = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( '''dataset_infos_dict''', [ DatasetInfosDict(), DatasetInfosDict({'''default''': DatasetInfo()} ), DatasetInfosDict({'''my_config_name''': DatasetInfo()} ), DatasetInfosDict( { '''default''': DatasetInfo( description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ) } ), DatasetInfosDict( { '''v1''': DatasetInfo(dataset_size=42 ), '''v2''': DatasetInfo(dataset_size=13_37 ), } ), ], ) def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : DatasetInfosDict ): """simple docstring""" _a = str(_lowerCAmelCase ) dataset_infos_dict.write_to_directory(_lowerCAmelCase ) _a = DatasetInfosDict.from_directory(_lowerCAmelCase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _a = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _a = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(_lowerCAmelCase, '''README.md''' ) )
320
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A_ ( _lowerCAmelCase : Dict ): """simple docstring""" if ( (cp >= 0x4e00 and cp <= 0x9fff) or (cp >= 0x3400 and cp <= 0x4dbf) # or (cp >= 0x2_0000 and cp <= 0x2_a6df) # or (cp >= 0x2_a700 and cp <= 0x2_b73f) # or (cp >= 0x2_b740 and cp <= 0x2_b81f) # or (cp >= 0x2_b820 and cp <= 0x2_ceaf) # or (cp >= 0xf900 and cp <= 0xfaff) or (cp >= 0x2_f800 and cp <= 0x2_fa1f) # ): # return True return False def A_ ( _lowerCAmelCase : str ): """simple docstring""" for char in word: _a = ord(_lowerCAmelCase ) if not _is_chinese_char(_lowerCAmelCase ): return 0 return 1 def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" _a = set() for token in tokens: _a = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase ) if chinese_word: word_set.add(_lowerCAmelCase ) _a = list(_lowerCAmelCase ) return word_list def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens _a = max([len(_lowerCAmelCase ) for w in chinese_word_set] ) _a = bert_tokens _a , _a = 0, len(_lowerCAmelCase ) while start < end: _a = True if is_chinese(bert_word[start] ): _a = min(end - start, _lowerCAmelCase ) for i in range(_lowerCAmelCase, 1, -1 ): _a = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1, start + i ): _a = '''##''' + bert_word[j] _a = start + i _a = False break if single_word: start += 1 return bert_word def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : LTP, _lowerCAmelCase : BertTokenizer ): """simple docstring""" _a = [] for i in range(0, len(_lowerCAmelCase ), 1_00 ): _a = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws _a = [get_chinese_word(_lowerCAmelCase ) for r in res] ltp_res.extend(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) _a = [] for i in range(0, len(_lowerCAmelCase ), 1_00 ): _a = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=_lowerCAmelCase, truncation=_lowerCAmelCase, max_length=5_12 ) bert_res.extend(res['''input_ids'''] ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) _a = [] for input_ids, chinese_word in zip(_lowerCAmelCase, _lowerCAmelCase ): _a = [] for id in input_ids: _a = bert_tokenizer._convert_id_to_token(_lowerCAmelCase ) input_tokens.append(_lowerCAmelCase ) _a = add_sub_symbol(_lowerCAmelCase, _lowerCAmelCase ) _a = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_lowerCAmelCase ): if token[:2] == "##": _a = token[2:] # save chinese tokens' pos if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ): ref_id.append(_lowerCAmelCase ) ref_ids.append(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) return ref_ids def A_ ( _lowerCAmelCase : Any ): """simple docstring""" with open(args.file_name, '''r''', encoding='''utf-8''' ) as f: _a = f.readlines() _a = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _a = LTP(args.ltp ) # faster in GPU device _a = BertTokenizer.from_pretrained(args.bert ) _a = prepare_ref(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) with open(args.save_path, '''w''', encoding='''utf-8''' ) as f: _a = [json.dumps(_lowerCAmelCase ) + '''\n''' for ref in ref_ids] f.writelines(_lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', required=False, type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', required=False, type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''', ) parser.add_argument( '''--bert''', required=False, type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''', ) parser.add_argument( '''--save_path''', required=False, type=str, default='''./resources/ref.txt''', help='''path to save res''', ) __snake_case = parser.parse_args() main(args)
320
1
"""simple docstring""" __snake_case = { '''Pillow''': '''Pillow<10.0.0''', '''accelerate''': '''accelerate>=0.20.3''', '''av''': '''av==9.2.0''', '''beautifulsoup4''': '''beautifulsoup4''', '''black''': '''black~=23.1''', '''codecarbon''': '''codecarbon==1.2.0''', '''cookiecutter''': '''cookiecutter==1.7.3''', '''dataclasses''': '''dataclasses''', '''datasets''': '''datasets!=2.5.0''', '''decord''': '''decord==0.6.0''', '''deepspeed''': '''deepspeed>=0.9.3''', '''diffusers''': '''diffusers''', '''dill''': '''dill<0.3.5''', '''evaluate''': '''evaluate>=0.2.0''', '''fairscale''': '''fairscale>0.3''', '''faiss-cpu''': '''faiss-cpu''', '''fastapi''': '''fastapi''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1,<=0.7.0''', '''ftfy''': '''ftfy''', '''fugashi''': '''fugashi>=1.0''', '''GitPython''': '''GitPython<3.1.19''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''', '''importlib_metadata''': '''importlib_metadata''', '''ipadic''': '''ipadic>=1.0.0,<2.0''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''', '''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''', '''jieba''': '''jieba''', '''kenlm''': '''kenlm''', '''keras-nlp''': '''keras-nlp>=0.3.1''', '''librosa''': '''librosa''', '''nltk''': '''nltk''', '''natten''': '''natten>=0.14.6''', '''numpy''': '''numpy>=1.17''', '''onnxconverter-common''': '''onnxconverter-common''', '''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''', '''onnxruntime''': '''onnxruntime>=1.4.0''', '''opencv-python''': '''opencv-python''', '''optuna''': '''optuna''', '''optax''': '''optax>=0.0.8,<=0.1.4''', '''packaging''': '''packaging>=20.0''', '''parameterized''': '''parameterized''', '''phonemizer''': '''phonemizer''', '''protobuf''': '''protobuf''', '''psutil''': '''psutil''', '''pyyaml''': '''pyyaml>=5.1''', '''pydantic''': '''pydantic<2''', '''pytest''': '''pytest>=7.2.0''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''python''': '''python>=3.8.0''', '''ray[tune]''': '''ray[tune]''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''', '''rjieba''': '''rjieba''', '''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''', '''ruff''': '''ruff>=0.0.241,<=0.0.259''', '''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''', '''sacremoses''': '''sacremoses''', '''safetensors''': '''safetensors>=0.3.1''', '''sagemaker''': '''sagemaker>=2.31.0''', '''scikit-learn''': '''scikit-learn''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''sigopt''': '''sigopt''', '''starlette''': '''starlette''', '''sudachipy''': '''sudachipy>=0.6.6''', '''sudachidict_core''': '''sudachidict_core>=20220729''', '''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''', '''tensorflow''': '''tensorflow>=2.6,<2.14''', '''tensorflow-text''': '''tensorflow-text<2.14''', '''tf2onnx''': '''tf2onnx''', '''timeout-decorator''': '''timeout-decorator''', '''timm''': '''timm''', '''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''', '''torch''': '''torch>=1.9,!=1.12.0''', '''torchaudio''': '''torchaudio''', '''torchvision''': '''torchvision''', '''pyctcdecode''': '''pyctcdecode>=0.4.0''', '''tqdm''': '''tqdm>=4.27''', '''unidic''': '''unidic>=1.0.2''', '''unidic_lite''': '''unidic_lite>=1.0.7''', '''urllib3''': '''urllib3<2.0.0''', '''uvicorn''': '''uvicorn''', }
320
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[Any] = 'gptj' A_ : Optional[int] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Union[str, Any]: _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = rotary_dim _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = use_cache _a = bos_token_id _a = eos_token_id super().__init__( bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase ) class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[Any]: super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ): # TODO: how to do that better? _a = 0 @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) _a = {0: '''batch''', 1: '''past_sequence + sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _UpperCAmelCase ( self ) -> int: return self._config.n_layer @property def _UpperCAmelCase ( self ) -> int: return self._config.n_head def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = super(__UpperCAmelCase , self ).generate_dummy_inputs( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) # We need to order the input in the way they appears in the forward() _a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _a = seqlen + 2 _a = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _a = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers ) ] _a = common_inputs['''attention_mask'''] if self.use_past: _a = ordered_inputs['''attention_mask'''].dtype _a = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _UpperCAmelCase ( self ) -> int: return 13
320
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name __snake_case = ''' Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior.to("cuda") >>> prompt = "A red cartoon frog, 4k" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> init_image = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/frog.png" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save("red_frog.png") ``` ''' def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Any, _lowerCAmelCase : int=8 ): """simple docstring""" _a = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _a = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : str=5_12, _lowerCAmelCase : int=5_12 ): """simple docstring""" _a = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1 ) _a = np.array(pil_image.convert('''RGB''' ) ) _a = arr.astype(np.floataa ) / 1_2_7.5 - 1 _a = np.transpose(_lowerCAmelCase, [2, 0, 1] ) _a = torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ) return image class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict: super().__init__() self.register_modules( unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , movq=__UpperCAmelCase , ) _a = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: # get the original timestep using init_timestep _a = min(int(num_inference_steps * strength ) , __UpperCAmelCase ) _a = max(num_inference_steps - init_timestep , 0 ) _a = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> int: if not isinstance(__UpperCAmelCase , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__UpperCAmelCase )}' ) _a = image.to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) _a = batch_size * num_images_per_prompt if image.shape[1] == 4: _a = image else: if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): _a = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__UpperCAmelCase ) ] _a = torch.cat(__UpperCAmelCase , dim=0 ) else: _a = self.movq.encode(__UpperCAmelCase ).latent_dist.sample(__UpperCAmelCase ) _a = self.movq.config.scaling_factor * init_latents _a = torch.cat([init_latents] , dim=0 ) _a = init_latents.shape _a = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase ) # get latents _a = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _a = init_latents return latents def _UpperCAmelCase ( self , __UpperCAmelCase=0 ) -> Dict: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) _a = torch.device(F'cuda:{gpu_id}' ) _a = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(__UpperCAmelCase , __UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=0 ) -> str: if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) _a = torch.device(F'cuda:{gpu_id}' ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=__UpperCAmelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _a = None for cpu_offloaded_model in [self.unet, self.movq]: _a , _a = cpu_offload_with_hook(__UpperCAmelCase , __UpperCAmelCase , prev_module_hook=__UpperCAmelCase ) # We'll offload the last model manually. _a = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _UpperCAmelCase ( self ) -> Union[str, Any]: if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(__UpperCAmelCase , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(__UpperCAmelCase ) def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 512 , __UpperCAmelCase = 512 , __UpperCAmelCase = 100 , __UpperCAmelCase = 4.0 , __UpperCAmelCase = 0.3 , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , ) -> str: _a = self._execution_device _a = guidance_scale > 1.0 if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _a = torch.cat(__UpperCAmelCase , dim=0 ) _a = image_embeds.shape[0] if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _a = torch.cat(__UpperCAmelCase , dim=0 ) if do_classifier_free_guidance: _a = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) _a = negative_image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 ) _a = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCAmelCase ) if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): _a = [image] if not all(isinstance(__UpperCAmelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( F'Input is in incorrect format: {[type(__UpperCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor' ) _a = torch.cat([prepare_image(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for i in image] , dim=0 ) _a = image.to(dtype=image_embeds.dtype , device=__UpperCAmelCase ) _a = self.movq.encode(__UpperCAmelCase )['''latents'''] _a = latents.repeat_interleave(__UpperCAmelCase , dim=0 ) self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase ) _a , _a = self.get_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _a = timesteps[:1].repeat(batch_size * num_images_per_prompt ) _a , _a = downscale_height_and_width(__UpperCAmelCase , __UpperCAmelCase , self.movq_scale_factor ) _a = self.prepare_latents( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase ) for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ): # expand the latents if we are doing classifier free guidance _a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _a = {'''image_embeds''': image_embeds} _a = self.unet( sample=__UpperCAmelCase , timestep=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , added_cond_kwargs=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0] if do_classifier_free_guidance: _a , _a = noise_pred.split(latents.shape[1] , dim=1 ) _a , _a = noise_pred.chunk(2 ) _a , _a = variance_pred.chunk(2 ) _a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _a = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _a , _a = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _a = self.scheduler.step( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase , )[0] # post-processing _a = self.movq.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' ) if output_type in ["np", "pil"]: _a = image * 0.5 + 0.5 _a = image.clamp(0 , 1 ) _a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": _a = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCAmelCase )
320
"""simple docstring""" import os import sys import unittest __snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') __snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> str: _a = get_test_to_tester_mapping(__UpperCAmelCase ) _a = get_test_to_tester_mapping(__UpperCAmelCase ) _a = {'''BertModelTest''': '''BertModelTester'''} _a = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = get_model_to_test_mapping(__UpperCAmelCase ) _a = get_model_to_test_mapping(__UpperCAmelCase ) _a = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } _a = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = get_model_to_tester_mapping(__UpperCAmelCase ) _a = get_model_to_tester_mapping(__UpperCAmelCase ) _a = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } _a = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
320
1
"""simple docstring""" import mpmath # for roots of unity import numpy as np class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Union[str, Any]: # Input as list _a = list(poly_a or [0] )[:] _a = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _a = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() _a = len(self.polyB ) # Add 0 to make lengths equal a power of 2 _a = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform _a = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product _a = self.__multiply() def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: _a = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB] # Corner case if len(__UpperCAmelCase ) <= 1: return dft[0] # _a = self.c_max_length // 2 while next_ncol > 0: _a = [[] for i in range(__UpperCAmelCase )] _a = self.root**next_ncol # First half of next step _a = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__UpperCAmelCase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step _a = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__UpperCAmelCase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update _a = new_dft _a = next_ncol // 2 return dft[0] def _UpperCAmelCase ( self ) -> Optional[int]: _a = self.__dft('''A''' ) _a = self.__dft('''B''' ) _a = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT _a = 2 while next_ncol <= self.c_max_length: _a = [[] for i in range(__UpperCAmelCase )] _a = self.root ** (next_ncol // 2) _a = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update _a = new_inverse_c next_ncol *= 2 # Unpack _a = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self ) -> Dict: _a = '''A = ''' + ''' + '''.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) ) _a = '''B = ''' + ''' + '''.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) ) _a = '''A*B = ''' + ''' + '''.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product ) ) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __lowerCamelCase : '''simple docstring''' @staticmethod def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: pass def A_ ( _lowerCAmelCase : Image ): """simple docstring""" _a = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def A_ ( _lowerCAmelCase : Image ): """simple docstring""" _a = np.array(_lowerCAmelCase ) _a = npimg.shape return {"hash": hashimage(_lowerCAmelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' A_ : Any = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) A_ : str = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: _a = MaskGenerationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int: pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def _UpperCAmelCase ( self ) -> List[str]: pass @slow @require_torch def _UpperCAmelCase ( self ) -> int: _a = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) _a = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 ) # Shortening by hashing _a = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871} ] , ) # fmt: on @require_torch @slow def _UpperCAmelCase ( self ) -> Any: _a = '''facebook/sam-vit-huge''' _a = pipeline('''mask-generation''' , model=__UpperCAmelCase ) _a = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing _a = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053}, ] , )
320
1
"""simple docstring""" import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , ) -> List[Any]: _a = size if size is not None else {'''height''': 18, '''width''': 18} _a = parent _a = batch_size _a = num_channels _a = image_size _a = min_resolution _a = max_resolution _a = do_resize _a = size _a = do_normalize _a = image_mean _a = image_std def _UpperCAmelCase ( self ) -> Any: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class __lowerCamelCase ( a__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[int] = DPTImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = DPTImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> Optional[int]: _a = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) ) def _UpperCAmelCase ( self ) -> Any: _a = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) _a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def _UpperCAmelCase ( self ) -> Union[str, Any]: # Initialize image_processing _a = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image ) # Test not batched input _a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched _a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _UpperCAmelCase ( self ) -> Optional[Any]: # Initialize image_processing _a = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray ) # Test not batched input _a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched _a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _UpperCAmelCase ( self ) -> List[Any]: # Initialize image_processing _a = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) # Test not batched input _a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched _a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , )
320
"""simple docstring""" import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]: _a = parent _a = batch_size _a = encoder_seq_length _a = decoder_seq_length # For common tests _a = self.decoder_seq_length _a = is_training _a = use_attention_mask _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = d_ff _a = relative_attention_num_buckets _a = dropout_rate _a = initializer_factor _a = eos_token_id _a = pad_token_id _a = decoder_start_token_id _a = None _a = decoder_layers def _UpperCAmelCase ( self ) -> Dict: return TaConfig.from_pretrained('''google/umt5-base''' ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]: if attention_mask is None: _a = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _a = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase ) if decoder_head_mask is None: _a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase ) if cross_attn_head_mask is None: _a = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def _UpperCAmelCase ( self ) -> Tuple: _a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) _a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _a = input_ids.clamp(self.pad_token_id + 1 ) _a = decoder_input_ids.clamp(self.pad_token_id + 1 ) _a = self.get_config() _a = config.num_attention_heads _a = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, input_dict def _UpperCAmelCase ( self ) -> int: _a , _a = self.prepare_config_and_inputs() return config, inputs_dict def _UpperCAmelCase ( self ) -> Tuple: return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _UpperCAmelCase ( self ) -> List[str]: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict: _a = UMTaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _a = model( input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , ) _a = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ) _a = result.last_hidden_state _a = result.past_key_values _a = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]: _a = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval() # first forward pass _a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _a = model(__UpperCAmelCase ) _a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _a , _a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _a = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and _a = torch.cat([input_ids, next_tokens] , dim=-1 ) _a = model(__UpperCAmelCase )['''last_hidden_state'''] _a = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state'''] # select random slice _a = ids_tensor((1,) , output_from_past.shape[-1] ).item() _a = output_from_no_past[:, -1, random_slice_idx].detach() _a = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]: _a = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval() _a = model(**__UpperCAmelCase )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() ) @require_torch class __lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) A_ : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () A_ : int = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) A_ : str = True A_ : List[str] = False A_ : List[Any] = False A_ : str = True A_ : List[str] = True # The small UMT5 model needs higher percentages for CPU/MP tests A_ : Optional[Any] = [0.8, 0.9] def _UpperCAmelCase ( self ) -> Tuple: _a = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def _UpperCAmelCase ( self ) -> int: _a = self.model_tester.prepare_config_and_inputs() _a = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( __UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] _a = self.model_tester.prepare_config_and_inputs() _a = config_and_inputs[0] _a = UMTaForConditionalGeneration(__UpperCAmelCase ).eval() model.to(__UpperCAmelCase ) _a = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ), } for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ): _a = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _a = torch.ones( config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ) _a = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , ) # We check the state of decoder_attentions and cross_attentions just from the last step _a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def _UpperCAmelCase ( self ) -> int: pass @require_torch @require_sentencepiece @require_tokenizers class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def _UpperCAmelCase ( self ) -> Optional[int]: _a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase ) _a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase ) _a = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] _a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids # fmt: off _a = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase ) _a = model.generate(input_ids.to(__UpperCAmelCase ) ) _a = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] _a = tokenizer.batch_decode(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
320
1
"""simple docstring""" from typing import Any class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> Union[str, Any]: _a = data _a = None class __lowerCamelCase : '''simple docstring''' def __init__( self ) -> Any: _a = None def _UpperCAmelCase ( self ) -> Dict: _a = self.head while temp is not None: print(temp.data , end=''' ''' ) _a = temp.next print() def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: _a = Node(__UpperCAmelCase ) _a = self.head _a = new_node def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: if node_data_a == node_data_a: return else: _a = self.head while node_a is not None and node_a.data != node_data_a: _a = node_a.next _a = self.head while node_a is not None and node_a.data != node_data_a: _a = node_a.next if node_a is None or node_a is None: return _a , _a = node_a.data, node_a.data if __name__ == "__main__": __snake_case = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('''After swapping''') ll.print_list()
320
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class __lowerCamelCase : '''simple docstring''' def __init__( self ) -> Tuple: _a = {} def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int: if self.graph.get(__UpperCAmelCase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: _a = [[w, v]] if not self.graph.get(__UpperCAmelCase ): _a = [] def _UpperCAmelCase ( self ) -> int: return list(self.graph ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]: if s == d: return [] _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple: if c == -1: _a = floor(random() * 10000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _a = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]: _a = deque() _a = [] if s == -2: _a = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple: _a = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict: return len(self.graph[u] ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s _a = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return sorted_nodes def _UpperCAmelCase ( self ) -> Optional[int]: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Any: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]: _a = time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _a = time() return end - begin def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]: _a = time() self.bfs(__UpperCAmelCase ) _a = time() return end - begin class __lowerCamelCase : '''simple docstring''' def __init__( self ) -> Optional[int]: _a = {} def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict: # check if the u exists if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist _a = [[w, v]] # add the other way if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist _a = [[w, u]] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) # the other way round if self.graph.get(__UpperCAmelCase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict: if s == d: return [] _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple: if c == -1: _a = floor(random() * 10000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _a = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]: _a = deque() _a = [] if s == -2: _a = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict: return len(self.graph[u] ) def _UpperCAmelCase ( self ) -> int: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _UpperCAmelCase ( self ) -> Union[str, Any]: return list(self.graph ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple: _a = time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _a = time() return end - begin def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _a = time() self.bfs(__UpperCAmelCase ) _a = time() return end - begin
320
1
"""simple docstring""" import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel __snake_case = HfApi() __snake_case = {} # fmt: off __snake_case = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) __snake_case = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) __snake_case = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) __snake_case = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) __snake_case = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) __snake_case = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) __snake_case = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) __snake_case = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) __snake_case = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) __snake_case = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) __snake_case = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) __snake_case = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) __snake_case = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) __snake_case = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) __snake_case = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on __snake_case = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": __snake_case = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(f'Started running {mod.modelId}!!!') if mod.modelId.startswith('''CompVis'''): __snake_case = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: __snake_case = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) __snake_case = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) __snake_case = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): __snake_case = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(f'{mod.modelId} has passed successfully!!!')
320
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Dict = 'unispeech' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) -> Union[str, Any]: super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) _a = hidden_size _a = feat_extract_norm _a = feat_extract_activation _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = conv_bias _a = num_conv_pos_embeddings _a = num_conv_pos_embedding_groups _a = len(self.conv_dim ) _a = num_hidden_layers _a = intermediate_size _a = hidden_act _a = num_attention_heads _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = feat_proj_dropout _a = final_dropout _a = layerdrop _a = layer_norm_eps _a = initializer_range _a = num_ctc_classes _a = vocab_size _a = do_stable_layer_norm _a = use_weighted_layer_sum _a = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a = apply_spec_augment _a = mask_time_prob _a = mask_time_length _a = mask_time_min_masks _a = mask_feature_prob _a = mask_feature_length _a = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _a = num_codevectors_per_group _a = num_codevector_groups _a = contrastive_logits_temperature _a = feat_quantizer_dropout _a = num_negatives _a = codevector_dim _a = proj_codevector_dim _a = diversity_loss_weight # ctc loss _a = ctc_loss_reduction _a = ctc_zero_infinity # pretraining loss _a = replace_prob @property def _UpperCAmelCase ( self ) -> Optional[int]: return functools.reduce(operator.mul , self.conv_stride , 1 )
320
1
"""simple docstring""" import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __snake_case = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , ) -> Tuple: _a = [file for file in os.listdir(__UpperCAmelCase ) if os.path.isfile(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) )] if identifier is not None: _a = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for n_ in n_identifier: _a = [file for file in files if n_ not in file] else: _a = [file for file in files if n_identifier not in file] _a = ignore_files or [] ignore_files.append('''__init__.py''' ) _a = [file for file in files if file not in ignore_files] for file in files: # Open all files print('''Testing''' , __UpperCAmelCase ) if only_modules: _a = file.split('''.''' )[0] try: _a = getattr(__UpperCAmelCase , __UpperCAmelCase ) _a = doctest.DocTestSuite(__UpperCAmelCase ) _a = unittest.TextTestRunner().run(__UpperCAmelCase ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F'{module_identifier} is not a module.' ) else: _a = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def _UpperCAmelCase ( self ) -> Any: _a = Path('''src/transformers''' ) _a = '''modeling''' _a = [ '''modeling_ctrl.py''', '''modeling_tf_ctrl.py''', ] self.analyze_directory(__UpperCAmelCase , identifier=__UpperCAmelCase , ignore_files=__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Any: _a = Path('''src/transformers''' ) _a = '''tokenization''' self.analyze_directory(__UpperCAmelCase , identifier=__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Any: _a = Path('''src/transformers''' ) _a = '''configuration''' self.analyze_directory(__UpperCAmelCase , identifier=__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = Path('''src/transformers''' ) _a = ['''configuration''', '''modeling''', '''tokenization'''] self.analyze_directory(__UpperCAmelCase , n_identifier=__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: _a = Path('''docs/source''' ) _a = ['''favicon.ico'''] self.analyze_directory(__UpperCAmelCase , ignore_files=__UpperCAmelCase , only_modules=__UpperCAmelCase )
320
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: __snake_case = None __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } __snake_case = { '''google/rembert''': 256, } __snake_case = '''▁''' class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Optional[Any] = VOCAB_FILES_NAMES A_ : List[str] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : List[Any] = RemBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it _a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) _a = do_lower_case _a = remove_space _a = keep_accents _a = vocab_file _a = False if not self.vocab_file else True def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(__UpperCAmelCase ) ) return _a = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
320
1
"""simple docstring""" import os def A_ ( ): """simple docstring""" with open(os.path.dirname(_lowerCAmelCase ) + '''/p022_names.txt''' ) as file: _a = str(file.readlines()[0] ) _a = names.replace('''"''', '''''' ).split(''',''' ) names.sort() _a = 0 _a = 0 for i, name in enumerate(_lowerCAmelCase ): for letter in name: name_score += ord(_lowerCAmelCase ) - 64 total_score += (i + 1) * name_score _a = 0 return total_score if __name__ == "__main__": print(solution())
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __snake_case = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=2 , ) -> Optional[int]: _a = parent _a = batch_size _a = image_size _a = patch_size _a = num_channels _a = is_training _a = use_labels _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = type_sequence_label_size _a = initializer_range _a = scope _a = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _a = (image_size // patch_size) ** 2 _a = num_patches + 2 def _UpperCAmelCase ( self ) -> List[Any]: _a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Any: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: _a = DeiTModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: _a = DeiTForMaskedImageModeling(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _a = model(__UpperCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _a = 1 _a = DeiTForMaskedImageModeling(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: _a = self.type_sequence_label_size _a = DeiTForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _a = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _a = 1 _a = DeiTForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _a = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _UpperCAmelCase ( self ) -> Dict: _a = self.prepare_config_and_inputs() ( ( _a ) , ( _a ) , ( _a ) , ) = config_and_inputs _a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCamelCase ( a__ , a__ , unittest.TestCase ): '''simple docstring''' A_ : Any = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) A_ : Union[str, Any] = ( { 'feature-extraction': DeiTModel, 'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) A_ : Tuple = False A_ : List[Any] = False A_ : Any = False def _UpperCAmelCase ( self ) -> str: _a = DeiTModelTester(self ) _a = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def _UpperCAmelCase ( self ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass def _UpperCAmelCase ( self ) -> List[Any]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def _UpperCAmelCase ( self ) -> Optional[int]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(__UpperCAmelCase ) _a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a = [*signature.parameters.keys()] _a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> int: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[str]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Dict: _a = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _UpperCAmelCase ( self ) -> int: if not self.model_tester.is_training: return _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__UpperCAmelCase ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue _a = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() _a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) _a = model(**__UpperCAmelCase ).loss loss.backward() def _UpperCAmelCase ( self ) -> Union[str, Any]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _a = False _a = True for model_class in self.all_model_classes: if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue _a = model_class(__UpperCAmelCase ) model.gradient_checkpointing_enable() model.to(__UpperCAmelCase ) model.train() _a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) _a = model(**__UpperCAmelCase ).loss loss.backward() def _UpperCAmelCase ( self ) -> Union[str, Any]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__UpperCAmelCase ), *get_values(__UpperCAmelCase ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ): _a = problem_type['''title'''] _a = problem_type['''num_labels'''] _a = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() _a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if problem_type["num_labels"] > 1: _a = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] ) _a = inputs['''labels'''].to(problem_type['''dtype'''] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list: _a = model(**__UpperCAmelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'Something is going wrong in the regression problem: intercepted {w.message}' ) loss.backward() @slow def _UpperCAmelCase ( self ) -> str: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a = DeiTModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def A_ ( ): """simple docstring""" _a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def _UpperCAmelCase ( self ) -> Optional[Any]: return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self ) -> Tuple: _a = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to( __UpperCAmelCase ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): _a = model(**__UpperCAmelCase ) # verify the logits _a = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) _a = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def _UpperCAmelCase ( self ) -> List[str]: _a = DeiTModel.from_pretrained( '''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ) _a = inputs.pixel_values.to(__UpperCAmelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): _a = model(__UpperCAmelCase )
320
"""simple docstring""" import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __lowerCamelCase ( a__ ): '''simple docstring''' @require_torch def _UpperCAmelCase ( self ) -> Union[str, Any]: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache _a = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__UpperCAmelCase ) BertModel.from_pretrained(__UpperCAmelCase ) BertTokenizer.from_pretrained(__UpperCAmelCase ) pipeline(task='''fill-mask''' , model=__UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed _a = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a = '''1''' _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache _a = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(__UpperCAmelCase ) BertModel.from_pretrained(__UpperCAmelCase ) BertTokenizer.from_pretrained(__UpperCAmelCase ) pipeline(task='''fill-mask''' , model=__UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed _a = self.get_env() _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def _UpperCAmelCase ( self ) -> Optional[Any]: # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a = ''' from transformers import BertConfig, BertModel, BertTokenizer ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket ''' # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed _a = self.get_env() _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # next emulate no network _a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a = '''1''' _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def _UpperCAmelCase ( self ) -> Tuple: _a = ''' from transformers import pipeline ''' _a = ''' mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) ''' _a = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket ''' _a = self.get_env() _a = '''1''' _a = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( '''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: _a = ''' from transformers import AutoModel ''' _a = ''' mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") ''' # baseline - just load from_pretrained with normal network _a = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed _a = self.get_env() _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a = '''1''' _a = subprocess.run(__UpperCAmelCase , env=__UpperCAmelCase , check=__UpperCAmelCase , capture_output=__UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() )
320
1
"""simple docstring""" from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=0.6 , __UpperCAmelCase=None , ) -> List[str]: _a = parent _a = batch_size _a = image_size _a = patch_size _a = num_channels _a = is_training _a = use_labels _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = type_sequence_label_size _a = initializer_range _a = mask_ratio _a = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _a = (image_size // patch_size) ** 2 _a = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def _UpperCAmelCase ( self ) -> int: _a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Optional[int]: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: _a = TFViTMAEModel(config=__UpperCAmelCase ) _a = model(__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: _a = TFViTMAEForPreTraining(__UpperCAmelCase ) _a = model(__UpperCAmelCase , training=__UpperCAmelCase ) # expected sequence length = num_patches _a = (self.image_size // self.patch_size) ** 2 _a = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _a = 1 _a = TFViTMAEForPreTraining(__UpperCAmelCase ) _a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _a = model(__UpperCAmelCase , training=__UpperCAmelCase ) _a = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def _UpperCAmelCase ( self ) -> Optional[int]: _a = self.prepare_config_and_inputs() ((_a) , (_a) , (_a)) = config_and_inputs _a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __lowerCamelCase ( a__ , a__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () A_ : List[Any] = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} A_ : Optional[int] = False A_ : Dict = False A_ : Optional[Any] = False A_ : Any = False def _UpperCAmelCase ( self ) -> List[Any]: _a = TFViTMAEModelTester(self ) _a = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def _UpperCAmelCase ( self ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def _UpperCAmelCase ( self ) -> str: pass def _UpperCAmelCase ( self ) -> Optional[Any]: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , tf.keras.layers.Layer ) ) def _UpperCAmelCase ( self ) -> str: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(__UpperCAmelCase ) _a = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a = [*signature.parameters.keys()] _a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> str: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: # make the mask reproducible np.random.seed(2 ) _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = int((config.image_size // config.patch_size) ** 2 ) _a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _a = model_class(__UpperCAmelCase ) _a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) _a = model(__UpperCAmelCase , noise=__UpperCAmelCase ) _a = copy.deepcopy(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) _a = model(**__UpperCAmelCase , noise=__UpperCAmelCase ) _a = outputs_dict[0].numpy() _a = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 ) def _UpperCAmelCase ( self ) -> Optional[int]: # make the mask reproducible np.random.seed(2 ) _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = int((config.image_size // config.patch_size) ** 2 ) _a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(__UpperCAmelCase ): _a = {} for k, v in inputs_dict.items(): if tf.is_tensor(__UpperCAmelCase ): _a = v.numpy() else: _a = np.array(__UpperCAmelCase ) return inputs_np_dict for model_class in self.all_model_classes: _a = model_class(__UpperCAmelCase ) _a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) _a = prepare_numpy_arrays(__UpperCAmelCase ) _a = model(__UpperCAmelCase , noise=__UpperCAmelCase ) _a = model(**__UpperCAmelCase , noise=__UpperCAmelCase ) self.assert_outputs_same(__UpperCAmelCase , __UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: # make masks reproducible np.random.seed(2 ) _a = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) _a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _a = tf.constant(__UpperCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _a = tf_noise super().check_pt_tf_models(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[str]: # make mask reproducible np.random.seed(2 ) _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(__UpperCAmelCase ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(__UpperCAmelCase , __UpperCAmelCase ),) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(__UpperCAmelCase , '''_keras_serializable''' , __UpperCAmelCase ) } _a = int((config.image_size // config.patch_size) ** 2 ) _a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _a = tf.convert_to_tensor(__UpperCAmelCase ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: _a = main_layer_class(__UpperCAmelCase ) _a = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } _a = tf.keras.Model(__UpperCAmelCase , outputs=main_layer(__UpperCAmelCase ) ) _a = model(__UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: _a = os.path.join(__UpperCAmelCase , '''keras_model.h5''' ) model.save(__UpperCAmelCase ) _a = tf.keras.models.load_model( __UpperCAmelCase , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(__UpperCAmelCase , tf.keras.Model ) _a = model(__UpperCAmelCase ) self.assert_outputs_same(__UpperCAmelCase , __UpperCAmelCase ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: # make mask reproducible np.random.seed(2 ) _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = int((config.image_size // config.patch_size) ** 2 ) _a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _a = model_class(__UpperCAmelCase ) _a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) _a = model(__UpperCAmelCase , noise=__UpperCAmelCase ) if model_class.__name__ == "TFViTMAEModel": _a = outputs.last_hidden_state.numpy() _a = 0 else: _a = outputs.logits.numpy() _a = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase , saved_model=__UpperCAmelCase ) _a = model_class.from_pretrained(__UpperCAmelCase ) _a = model(__UpperCAmelCase , noise=__UpperCAmelCase ) if model_class.__name__ == "TFViTMAEModel": _a = after_outputs['''last_hidden_state'''].numpy() _a = 0 else: _a = after_outputs['''logits'''].numpy() _a = 0 _a = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCAmelCase , 1e-5 ) def _UpperCAmelCase ( self ) -> Optional[Any]: # make mask reproducible np.random.seed(2 ) _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = int((config.image_size // config.patch_size) ** 2 ) _a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _a = model_class(__UpperCAmelCase ) _a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) _a = model(__UpperCAmelCase , noise=__UpperCAmelCase ) _a = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(__UpperCAmelCase ) _a = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config _a = model_class.from_config(model.config ) _a = new_model(__UpperCAmelCase ) # Build model new_model.set_weights(model.get_weights() ) _a = new_model(__UpperCAmelCase , noise=__UpperCAmelCase ) self.assert_outputs_same(__UpperCAmelCase , __UpperCAmelCase ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def _UpperCAmelCase ( self ) -> int: pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def _UpperCAmelCase ( self ) -> Tuple: pass @slow def _UpperCAmelCase ( self ) -> Optional[int]: _a = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(__UpperCAmelCase ) def A_ ( ): """simple docstring""" _a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def _UpperCAmelCase ( self ) -> Optional[int]: return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def _UpperCAmelCase ( self ) -> str: # make random mask reproducible across the PT and TF model np.random.seed(2 ) _a = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) _a = self.default_image_processor _a = prepare_img() _a = image_processor(images=__UpperCAmelCase , return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _a = ViTMAEConfig() _a = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _a = np.random.uniform(size=(1, num_patches) ) # forward pass _a = model(**__UpperCAmelCase , noise=__UpperCAmelCase ) # verify the logits _a = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) _a = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , __UpperCAmelCase , atol=1e-4 )
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : str = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Any = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Dict = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Tuple = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Any = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ['''flax'''] )
320
1
"""simple docstring""" def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : str ): """simple docstring""" _a = [1] for i in range(2, _lowerCAmelCase ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" _a = [] _a = list(range(_lowerCAmelCase ) ) # Find permutation while factorials: _a = factorials.pop() _a , _a = divmod(_lowerCAmelCase, _lowerCAmelCase ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __snake_case = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __snake_case = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' __snake_case = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" def remove_articles(_lowerCAmelCase : Optional[int] ): _a = re.compile(R'''\b(a|an|the)\b''', re.UNICODE ) return re.sub(_lowerCAmelCase, ''' ''', _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase : Tuple ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase : Tuple ): _a = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase : List[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ): """simple docstring""" return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ): """simple docstring""" _a = [any(compute_exact(_lowerCAmelCase, _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 1_00 def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : str ): """simple docstring""" _a = [rgram for rgrams in rgramslist for rgram in rgrams] _a = Counter(_lowerCAmelCase ) _a = Counter(_lowerCAmelCase ) _a = Counter() for sgram, scount in sgramcounter.items(): _a = scount * numref _a = Counter(_lowerCAmelCase ) _a = Counter() for cgram, ccount in cgramcounter.items(): _a = ccount * numref # KEEP _a = sgramcounter_rep & cgramcounter_rep _a = keepgramcounter_rep & rgramcounter _a = sgramcounter_rep & rgramcounter _a = 0 _a = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(_lowerCAmelCase ) > 0: _a = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _a = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _a = 0 if keepscore_precision > 0 or keepscore_recall > 0: _a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _a = sgramcounter_rep - cgramcounter_rep _a = delgramcounter_rep - rgramcounter _a = sgramcounter_rep - rgramcounter _a = 0 _a = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 if len(_lowerCAmelCase ) > 0: _a = deltmpscorea / len(_lowerCAmelCase ) # ADDITION _a = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) _a = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) _a = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) _a = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _a = 1 _a = 1 if len(_lowerCAmelCase ) > 0: _a = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: _a = addtmpscore / len(_lowerCAmelCase ) _a = 0 if addscore_precision > 0 or addscore_recall > 0: _a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Any ): """simple docstring""" _a = len(_lowerCAmelCase ) _a = ssent.split(''' ''' ) _a = csent.split(''' ''' ) _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] _a = [] for rsent in rsents: _a = rsent.split(''' ''' ) _a = [] _a = [] _a = [] ragramslist.append(_lowerCAmelCase ) for i in range(0, len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: _a = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: _a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0, len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: _a = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: _a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0, len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: _a = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: _a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) ((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) _a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _a = sum([delascore, delascore, delascore, delascore] ) / 4 _a = sum([addascore, addascore, addascore, addascore] ) / 4 _a = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : bool = True, _lowerCAmelCase : str = "13a", _lowerCAmelCase : bool = True ): """simple docstring""" if lowercase: _a = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _a = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: _a = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": _a = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase, escape=_lowerCAmelCase ) elif tokenizer == "penn": _a = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase ) else: _a = sentence if not return_str: _a = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ): """simple docstring""" if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _a = 0 for src, pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ), normalize(_lowerCAmelCase ), [normalize(_lowerCAmelCase ) for sent in refs] ) _a = sari_score / len(_lowerCAmelCase ) return 1_00 * sari_score def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple, _lowerCAmelCase : Any="exp", _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Union[str, Any]=False, _lowerCAmelCase : Optional[Any]=False, _lowerCAmelCase : List[str]=False, ): """simple docstring""" _a = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _a = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] _a = sacrebleu.corpus_bleu( _lowerCAmelCase, _lowerCAmelCase, smooth_method=_lowerCAmelCase, smooth_value=_lowerCAmelCase, force=_lowerCAmelCase, lowercase=_lowerCAmelCase, use_effective_order=_lowerCAmelCase, ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCamelCase ( datasets.Metric ): '''simple docstring''' def _UpperCAmelCase ( self ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: _a = {} result.update({'''sari''': compute_sari(sources=__UpperCAmelCase , predictions=__UpperCAmelCase , references=__UpperCAmelCase )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} ) result.update({'''exact''': compute_em(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} ) return result
320
1
"""simple docstring""" def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float ): """simple docstring""" if mass < 0: raise ValueError('''The mass of a body cannot be negative''' ) return 0.5 * mass * abs(_lowerCAmelCase ) * abs(_lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
320
"""simple docstring""" def A_ ( _lowerCAmelCase : int = 50 ): """simple docstring""" _a = [1] * (length + 1) for row_length in range(3, length + 1 ): for block_length in range(3, row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
320
1
"""simple docstring""" def A_ ( _lowerCAmelCase : int ): """simple docstring""" if isinstance(_lowerCAmelCase, _lowerCAmelCase ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if isinstance(_lowerCAmelCase, _lowerCAmelCase ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if num == 0: return "0b0" _a = False if num < 0: _a = True _a = -num _a = [] while num > 0: binary.insert(0, num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary ) return "0b" + "".join(str(_lowerCAmelCase ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __snake_case = logging.get_logger('''transformers.models.speecht5''') __snake_case = { '''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''', '''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''', '''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''', '''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''', } __snake_case = { '''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''', '''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''', } __snake_case = { '''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''', '''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''', '''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''', '''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''', '''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''', } __snake_case = { '''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''', '''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''', '''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''', '''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''', '''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''', '''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''', '''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''', '''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''', '''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''', '''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''', '''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''', '''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''', } __snake_case = { '''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''', } __snake_case = { '''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''', } __snake_case = { '''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''', '''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''', '''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''', '''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''', '''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''', '''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''', '''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''', '''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''', '''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''', } __snake_case = { '''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''', '''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''', '''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''', '''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''', '''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''', '''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''', '''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''', '''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''', '''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''', '''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''', '''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''', '''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''', '''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''', } __snake_case = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __snake_case = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __snake_case = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __snake_case = [] __snake_case = [ '''encoder.version''', '''encoder.layers.*.norm_k.weight''', '''encoder.layers.*.norm_k.bias''', '''decoder.version''', '''decoder.layers.*.norm_k.weight''', '''decoder.layers.*.norm_k.bias''', '''decoder.pos_emb.pe_k''', '''speech_encoder_prenet.embed_positions._float_tensor''', '''text_decoder_prenet.embed_positions._float_tensor''', ] __snake_case = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''speech_decoder_prenet.*''', '''speech_decoder_postnet.*''', ] __snake_case = IGNORE_KEYS + [ '''encoder.proj''', '''speech_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] __snake_case = IGNORE_KEYS + [ '''encoder.proj''', '''text_encoder_prenet.*''', '''text_decoder_prenet.*''', '''text_decoder_postnet.*''', ] def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[int] ): """simple docstring""" for attribute in key.split('''.''' ): _a = getattr(_lowerCAmelCase, _lowerCAmelCase ) if weight_type is not None: _a = getattr(_lowerCAmelCase, _lowerCAmelCase ).shape else: _a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": _a = value elif weight_type == "weight_g": _a = value elif weight_type == "weight_v": _a = value elif weight_type == "bias": _a = value elif weight_type == "running_mean": _a = value elif weight_type == "running_var": _a = value elif weight_type == "num_batches_tracked": _a = value else: _a = value logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple ): """simple docstring""" for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: _a , _a = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int ): """simple docstring""" _a = [] if task == "s2t": _a = hf_model.speechta.encoder.prenet.feature_encoder _a = MAPPING_S2T _a = IGNORE_KEYS_S2T elif task == "t2s": _a = None _a = MAPPING_T2S _a = IGNORE_KEYS_T2S elif task == "s2s": _a = hf_model.speechta.encoder.prenet.feature_encoder _a = MAPPING_S2S _a = IGNORE_KEYS_S2S else: raise ValueError(f'Unsupported task: {task}' ) for name, value in fairseq_dict.items(): if should_ignore(_lowerCAmelCase, _lowerCAmelCase ): logger.info(f'{name} was ignored' ) continue _a = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', ) _a = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: _a , _a = key.split('''.*.''' ) if prefix in name and suffix in name: _a = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: _a = True if "*" in mapped_key: _a = name.split(_lowerCAmelCase )[0].split('''.''' )[-2] _a = mapped_key.replace('''*''', _lowerCAmelCase ) if "weight_g" in name: _a = '''weight_g''' elif "weight_v" in name: _a = '''weight_v''' elif "bias" in name: _a = '''bias''' elif "weight" in name: _a = '''weight''' elif "running_mean" in name: _a = '''running_mean''' elif "running_var" in name: _a = '''running_var''' elif "num_batches_tracked" in name: _a = '''num_batches_tracked''' else: _a = None set_recursively(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f'Unused weights: {unused_weights}' ) def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any] ): """simple docstring""" _a = full_name.split('''conv_layers.''' )[-1] _a = name.split('''.''' ) _a = int(items[0] ) _a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) _a = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) _a = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) _a = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) _a = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : int=None, ): """simple docstring""" if config_path is not None: _a = SpeechTaConfig.from_pretrained(_lowerCAmelCase ) else: _a = SpeechTaConfig() if task == "s2t": _a = config.max_text_positions _a = SpeechTaForSpeechToText(_lowerCAmelCase ) elif task == "t2s": _a = 18_76 _a = 6_00 _a = config.max_speech_positions _a = SpeechTaForTextToSpeech(_lowerCAmelCase ) elif task == "s2s": _a = 18_76 _a = config.max_speech_positions _a = SpeechTaForSpeechToSpeech(_lowerCAmelCase ) else: raise ValueError(f'Unknown task name: {task}' ) if vocab_path: _a = SpeechTaTokenizer(_lowerCAmelCase, model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it _a = AddedToken('''<mask>''', lstrip=_lowerCAmelCase, rstrip=_lowerCAmelCase ) _a = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) _a = SpeechTaFeatureExtractor() _a = SpeechTaProcessor(tokenizer=_lowerCAmelCase, feature_extractor=_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) _a = torch.load(_lowerCAmelCase ) recursively_load_weights(fairseq_checkpoint['''model'''], _lowerCAmelCase, _lowerCAmelCase ) model.save_pretrained(_lowerCAmelCase ) if repo_id: print('''Pushing to the hub...''' ) processor.push_to_hub(_lowerCAmelCase ) model.push_to_hub(_lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '''--task''', default='''s2t''', type=str, help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __snake_case = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
320
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Optional[int] = 'roberta' def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-1_2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> str: super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = hidden_act _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = initializer_range _a = layer_norm_eps _a = position_embedding_type _a = use_cache _a = classifier_dropout class __lowerCamelCase ( a__ ): '''simple docstring''' @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
320
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[Any] = 'decision_transformer' A_ : Union[str, Any] = ['past_key_values'] A_ : str = { 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , __UpperCAmelCase=17 , __UpperCAmelCase=4 , __UpperCAmelCase=128 , __UpperCAmelCase=4096 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=1024 , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[int]: _a = state_dim _a = act_dim _a = hidden_size _a = max_ep_len _a = action_tanh _a = vocab_size _a = n_positions _a = n_layer _a = n_head _a = n_inner _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = scale_attn_weights _a = use_cache _a = scale_attn_by_inverse_layer_idx _a = reorder_and_upcast_attn _a = bos_token_id _a = eos_token_id super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
320
1
"""simple docstring""" from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 __snake_case = { # 1536-bit 5: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 2048-bit 14: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 3072-bit 15: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 4096-bit 16: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199''' + '''FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 6144-bit 17: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08''' + '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B''' + '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9''' + '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6''' + '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8''' + '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C''' + '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718''' + '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D''' + '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D''' + '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226''' + '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC''' + '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26''' + '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB''' + '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2''' + '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127''' + '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406''' + '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918''' + '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151''' + '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03''' + '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F''' + '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B''' + '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632''' + '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E''' + '''6DCC4024FFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, # 8192-bit 18: { '''prime''': int( '''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1''' + '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD''' + '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245''' + '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED''' + '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D''' + '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F''' + '''83655D23DCA3AD961C62F356208552BB9ED529077096966D''' + '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B''' + '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9''' + '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510''' + '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64''' + '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7''' + '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B''' + '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C''' + '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31''' + '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7''' + '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA''' + '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6''' + '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED''' + '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9''' + '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492''' + '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD''' + '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831''' + '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B''' + '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF''' + '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6''' + '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3''' + '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA''' + '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328''' + '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C''' + '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE''' + '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4''' + '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300''' + '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568''' + '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9''' + '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B''' + '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A''' + '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36''' + '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1''' + '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92''' + '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47''' + '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71''' + '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''', base=16, ), '''generator''': 2, }, } class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase = 14 ) -> None: if group not in primes: raise ValueError('''Unsupported Group''' ) _a = primes[group]['''prime'''] _a = primes[group]['''generator'''] _a = int(hexlify(urandom(32 ) ) , base=16 ) def _UpperCAmelCase ( self ) -> str: return hex(self.__private_key )[2:] def _UpperCAmelCase ( self ) -> str: _a = pow(self.generator , self.__private_key , self.prime ) return hex(__UpperCAmelCase )[2:] def _UpperCAmelCase ( self , __UpperCAmelCase ) -> bool: # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(__UpperCAmelCase , (self.prime - 1) // 2 , self.prime ) == 1 ) def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str: _a = int(__UpperCAmelCase , base=16 ) if not self.is_valid_public_key(__UpperCAmelCase ): raise ValueError('''Invalid public key''' ) _a = pow(__UpperCAmelCase , self.__private_key , self.prime ) return shaaaa(str(__UpperCAmelCase ).encode() ).hexdigest() @staticmethod def _UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ) -> bool: # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(__UpperCAmelCase , (prime - 1) // 2 , __UpperCAmelCase ) == 1 ) @staticmethod def _UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 14 ) -> str: _a = int(__UpperCAmelCase , base=16 ) _a = int(__UpperCAmelCase , base=16 ) _a = primes[group]['''prime'''] if not DiffieHellman.is_valid_public_key_static(__UpperCAmelCase , __UpperCAmelCase ): raise ValueError('''Invalid public key''' ) _a = pow(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return shaaaa(str(__UpperCAmelCase ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __snake_case = logging.get_logger(__name__) if is_vision_available(): import PIL class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[str] = ['pixel_values'] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None: super().__init__(**__UpperCAmelCase ) _a = size if size is not None else {'''shortest_edge''': 224} _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) _a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) _a = do_resize _a = size _a = resample _a = do_center_crop _a = crop_size _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _a = image_std if image_std is not None else OPENAI_CLIP_STD _a = do_convert_rgb def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _a = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: _a = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]: return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image: _a = do_resize if do_resize is not None else self.do_resize _a = size if size is not None else self.size _a = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) _a = resample if resample is not None else self.resample _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _a = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _a = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. _a = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: _a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: _a = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: _a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: _a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] _a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] _a = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
320
1
"""simple docstring""" import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig __snake_case = { '''facebook/maskformer-swin-base-ade''': ( '''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json''' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } __snake_case = logging.get_logger(__name__) class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Tuple = 'maskformer' A_ : str = {'hidden_size': 'mask_feature_size'} A_ : Optional[Any] = ['resnet', 'swin'] A_ : int = ['detr'] def __init__( self , __UpperCAmelCase = 256 , __UpperCAmelCase = 256 , __UpperCAmelCase = 0.1 , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0.02 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = 20.0 , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[int]: if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k _a = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _a = backbone_config.pop('''model_type''' ) _a = CONFIG_MAPPING[backbone_model_type] _a = config_class.from_dict(__UpperCAmelCase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ' F'Supported model types: {",".join(self.backbones_supported )}' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 _a = DetrConfig() else: # verify that the decoder is supported _a = ( decoder_config.pop('''model_type''' ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F'Transformer Decoder {decoder_type} not supported, please use one of' F' {",".join(self.decoders_supported )}' ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _a = CONFIG_MAPPING[decoder_type] _a = config_class.from_dict(__UpperCAmelCase ) _a = backbone_config _a = decoder_config # main feature dimension for the model _a = fpn_feature_size _a = mask_feature_size # initializer _a = init_std _a = init_xavier_std # Hungarian matcher && loss _a = cross_entropy_weight _a = dice_weight _a = mask_weight _a = use_auxiliary_loss _a = no_object_weight _a = output_auxiliary_logits _a = self.decoder_config.encoder_attention_heads _a = self.decoder_config.num_hidden_layers super().__init__(**__UpperCAmelCase ) @classmethod def _UpperCAmelCase ( cls , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: return cls( backbone_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , **__UpperCAmelCase , ) def _UpperCAmelCase ( self ) -> Dict[str, any]: _a = copy.deepcopy(self.__dict__ ) _a = self.backbone_config.to_dict() _a = self.decoder_config.to_dict() _a = self.__class__.model_type return output
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __snake_case = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import argparse import json import subprocess def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : Tuple ): """simple docstring""" _a = [] _a = ( f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"' ''' https://api.github.com/repos/huggingface/transformers/actions/runners''' ) _a = subprocess.run(_lowerCAmelCase, shell=_lowerCAmelCase, stdout=subprocess.PIPE ) _a = output.stdout.decode('''utf-8''' ) _a = json.loads(_lowerCAmelCase ) _a = status['''runners'''] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_lowerCAmelCase ) # save the result so we can report them on Slack with open('''offline_runners.txt''', '''w''' ) as fp: fp.write(json.dumps(_lowerCAmelCase ) ) if len(_lowerCAmelCase ) > 0: _a = '''\n'''.join([x['''name'''] for x in offline_runners] ) raise ValueError(f'The following runners are offline:\n{failed}' ) if __name__ == "__main__": def A_ ( _lowerCAmelCase : Optional[int] ): """simple docstring""" return values.split(''',''' ) __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--target_runners''', default=None, type=list_str, required=True, help='''Comma-separated list of runners to check status.''', ) parser.add_argument( '''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.''' ) __snake_case = parser.parse_args() get_runner_status(args.target_runners, args.token)
320
"""simple docstring""" from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge __snake_case = [ '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the''' ''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe''' ''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''', '''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal''' ''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s''' ''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the''' ''' body.''', '''Amnesty International releases its annual report on the death penalty. The report catalogs the use of''' ''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the''' ''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital''' ''' punishment.''', ] __snake_case = [ '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''' ''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz''' ''' had informed his Lufthansa training school of an episode of severe depression, airline says .''', '''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .''' ''' Israel and the United States opposed the move, which could open the door to war crimes investigations against''' ''' Israelis .''', '''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to''' ''' death . Organization claims that governments around the world are using the threat of terrorism to advance''' ''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death''' ''' sentences up by 28% .''', ] def A_ ( ): """simple docstring""" _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2''', '''rougeL'''] ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase ) _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, bootstrap_aggregation=_lowerCAmelCase, rouge_keys=['''rouge2'''] ) assert ( pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean() ) def A_ ( ): """simple docstring""" _a = '''rougeLsum''' _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=[k] )[k] assert score > score_no_sep def A_ ( ): """simple docstring""" _a = ['''rouge1''', '''rouge2''', '''rougeL'''] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase ) _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase, rouge_keys=_lowerCAmelCase ) assert score_sep == score_no_sep def A_ ( ): """simple docstring""" _a = [ '''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''', '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''', ] _a = [ '''Margot Frank, died in 1945, a month earlier than previously thought.''', '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of''' ''' the final seconds on board Flight 9525.''', ] assert calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) == calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, newline_sep=_lowerCAmelCase ) def A_ ( ): """simple docstring""" _a = [ '''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" ''' ] _a = [ ''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .''' ] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''], newline_sep=_lowerCAmelCase )['''rougeLsum'''] _a = calculate_rouge(_lowerCAmelCase, _lowerCAmelCase, rouge_keys=['''rougeLsum'''] )['''rougeLsum'''] assert new_score > prev_score def A_ ( ): """simple docstring""" _a = Path('''examples/seq2seq/test_data/wmt_en_ro''' ) _a = calculate_rouge_path(data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ) ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase ) _a = calculate_rouge_path( data_dir.joinpath('''test.source''' ), data_dir.joinpath('''test.target''' ), bootstrap_aggregation=_lowerCAmelCase ) assert isinstance(_lowerCAmelCase, _lowerCAmelCase )
320
1
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Dict = 'M-CLIP' def __init__( self , __UpperCAmelCase=1024 , __UpperCAmelCase=768 , **__UpperCAmelCase ) -> List[str]: _a = transformerDimSize _a = imageDimSize super().__init__(**__UpperCAmelCase ) class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : int = MCLIPConfig def __init__( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: super().__init__(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) _a = XLMRobertaModel(__UpperCAmelCase ) _a = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: _a = self.transformer(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0] _a = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(__UpperCAmelCase ), embs
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor __snake_case = logging.get_logger(__name__) class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: warnings.warn( '''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
320
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Optional[int] = 'roformer' def __init__( self , __UpperCAmelCase=50000 , __UpperCAmelCase=None , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1536 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> Dict: super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _a = vocab_size _a = hidden_size if embedding_size is None else embedding_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = hidden_act _a = intermediate_size _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = max_position_embeddings _a = type_vocab_size _a = initializer_range _a = layer_norm_eps _a = rotary_value _a = use_cache class __lowerCamelCase ( a__ ): '''simple docstring''' @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} _a = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
320
"""simple docstring""" from __future__ import annotations def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, ): """simple docstring""" if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif stress < 0: raise ValueError('''Stress cannot be negative''' ) elif tangential_force < 0: raise ValueError('''Tangential Force cannot be negative''' ) elif area < 0: raise ValueError('''Area cannot be negative''' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __snake_case = 16 __snake_case = 32 def A_ ( _lowerCAmelCase : Accelerator, _lowerCAmelCase : int = 16 ): """simple docstring""" _a = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _a = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(_lowerCAmelCase : str ): # max_length=None => use the model max length (it's actually the default) _a = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=_lowerCAmelCase, max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _a = datasets.map( _lowerCAmelCase, batched=_lowerCAmelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _a = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(_lowerCAmelCase : Optional[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. _a = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _a = 16 elif accelerator.mixed_precision != "no": _a = 8 else: _a = None return tokenizer.pad( _lowerCAmelCase, padding='''longest''', max_length=_lowerCAmelCase, pad_to_multiple_of=_lowerCAmelCase, return_tensors='''pt''', ) # Instantiate dataloaders. _a = DataLoader( tokenized_datasets['''train'''], shuffle=_lowerCAmelCase, collate_fn=_lowerCAmelCase, batch_size=_lowerCAmelCase, drop_last=_lowerCAmelCase ) _a = DataLoader( tokenized_datasets['''validation'''], shuffle=_lowerCAmelCase, collate_fn=_lowerCAmelCase, batch_size=_lowerCAmelCase, drop_last=(accelerator.mixed_precision == '''fp8'''), ) return train_dataloader, eval_dataloader def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Tuple ): """simple docstring""" _a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _a = config['''lr'''] _a = int(config['''num_epochs'''] ) _a = int(config['''seed'''] ) _a = int(config['''batch_size'''] ) _a = evaluate.load('''glue''', '''mrpc''' ) # If the batch size is too big we use gradient accumulation _a = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _a = batch_size // MAX_GPU_BATCH_SIZE _a = MAX_GPU_BATCH_SIZE set_seed(_lowerCAmelCase ) _a , _a = get_dataloaders(_lowerCAmelCase, _lowerCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=_lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _a = model.to(accelerator.device ) # Instantiate optimizer _a = AdamW(params=model.parameters(), lr=_lowerCAmelCase ) # Instantiate scheduler _a = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase, num_warmup_steps=1_00, num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _a , _a , _a , _a , _a = accelerator.prepare( _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) # Now we train the model for epoch in range(_lowerCAmelCase ): model.train() for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _a = model(**_lowerCAmelCase ) _a = outputs.loss _a = loss / gradient_accumulation_steps accelerator.backward(_lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _a = model(**_lowerCAmelCase ) _a = outputs.logits.argmax(dim=-1 ) _a , _a = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_lowerCAmelCase, references=_lowerCAmelCase, ) _a = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:', _lowerCAmelCase ) def A_ ( ): """simple docstring""" _a = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''', type=_lowerCAmelCase, default=_lowerCAmelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''', ) parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' ) _a = parser.parse_args() _a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_lowerCAmelCase, _lowerCAmelCase ) if __name__ == "__main__": main()
320
"""simple docstring""" def A_ ( ): """simple docstring""" _a = [] _a = 1 while len(_lowerCAmelCase ) < 1e6: constant.append(str(_lowerCAmelCase ) ) i += 1 _a = ''''''.join(_lowerCAmelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[9_99] ) * int(constant[99_99] ) * int(constant[9_99_99] ) * int(constant[99_99_99] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __snake_case = logging.get_logger(__name__) def A_ ( _lowerCAmelCase : str ): """simple docstring""" if isinstance(_lowerCAmelCase, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_lowerCAmelCase, (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_lowerCAmelCase ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Optional[int] = ['pixel_values'] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: super().__init__(**__UpperCAmelCase ) _a = size if size is not None else {'''shortest_edge''': 224} _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) _a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} _a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' ) _a = do_resize _a = size _a = do_center_crop _a = crop_size _a = resample _a = do_rescale _a = rescale_factor _a = do_normalize _a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a = image_std if image_std is not None else IMAGENET_STANDARD_STD def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" in size: _a = get_resize_output_image_size(__UpperCAmelCase , size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) elif "height" in size and "width" in size: _a = (size['''height'''], size['''width''']) else: raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: _a = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[Any]: return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray: if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _a = to_numpy_array(__UpperCAmelCase ) if do_resize: _a = self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) if do_center_crop: _a = self.center_crop(__UpperCAmelCase , size=__UpperCAmelCase ) if do_rescale: _a = self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) if do_normalize: _a = self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) _a = to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) return image def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image: _a = do_resize if do_resize is not None else self.do_resize _a = resample if resample is not None else self.resample _a = do_center_crop if do_center_crop is not None else self.do_center_crop _a = do_rescale if do_rescale is not None else self.do_rescale _a = rescale_factor if rescale_factor is not None else self.rescale_factor _a = do_normalize if do_normalize is not None else self.do_normalize _a = image_mean if image_mean is not None else self.image_mean _a = image_std if image_std is not None else self.image_std _a = size if size is not None else self.size _a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) _a = crop_size if crop_size is not None else self.crop_size _a = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) _a = make_batched(__UpperCAmelCase ) _a = [ [ self._preprocess_image( image=__UpperCAmelCase , do_resize=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , crop_size=__UpperCAmelCase , do_rescale=__UpperCAmelCase , rescale_factor=__UpperCAmelCase , do_normalize=__UpperCAmelCase , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase , data_format=__UpperCAmelCase , ) for img in video ] for video in videos ] _a = {'''pixel_values''': videos} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
320
"""simple docstring""" import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __snake_case = logging.get_logger(__name__) __snake_case = { '''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''', # See all BART models at https://huggingface.co/models?filter=bart } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[str] = 'bart' A_ : Optional[Any] = ['past_key_values'] A_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=50265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> Tuple: _a = vocab_size _a = max_position_embeddings _a = d_model _a = encoder_ffn_dim _a = encoder_layers _a = encoder_attention_heads _a = decoder_ffn_dim _a = decoder_layers _a = decoder_attention_heads _a = dropout _a = attention_dropout _a = activation_dropout _a = activation_function _a = init_std _a = encoder_layerdrop _a = decoder_layerdrop _a = classifier_dropout _a = use_cache _a = encoder_layers _a = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): _a = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' '''The config can simply be saved and uploaded again to be fixed.''' ) class __lowerCamelCase ( a__ ): '''simple docstring''' @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _a = {0: '''batch'''} _a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: _a = {0: '''batch''', 1: '''decoder_sequence'''} _a = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: _a , _a = self.num_layers for i in range(__UpperCAmelCase ): _a = {0: '''batch''', 2: '''past_sequence + sequence'''} _a = {0: '''batch''', 2: '''past_sequence + sequence'''} else: _a = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: _a = super().outputs else: _a = super(__UpperCAmelCase , self ).outputs if self.use_past: _a , _a = self.num_layers for i in range(__UpperCAmelCase ): _a = {0: '''batch''', 2: '''past_sequence + sequence'''} _a = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs _a = seq_length if not self.use_past else 1 _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) _a = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} _a = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape _a = common_inputs['''decoder_input_ids'''].shape[1] _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = decoder_seq_length + 3 _a = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _a = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) _a = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _a , _a = self.num_layers _a = min(__UpperCAmelCase , __UpperCAmelCase ) _a = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers _a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. _a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _a = seqlen + 2 _a , _a = self.num_layers _a , _a = self.num_attention_heads _a = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _a = common_inputs['''attention_mask'''].dtype _a = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) _a = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _a = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _a = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) _a = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence _a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size _a = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: _a = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": _a = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: _a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: if self.task in ["default", "seq2seq-lm"]: _a = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: _a = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
320
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=[1, 1, 2] , __UpperCAmelCase=1 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=8 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=3 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=False , ) -> Optional[Any]: _a = parent _a = batch_size _a = seq_length _a = is_training _a = use_input_mask _a = use_token_type_ids _a = use_labels _a = vocab_size _a = block_sizes _a = num_decoder_layers _a = d_model _a = n_head _a = d_head _a = d_inner _a = hidden_act _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = max_position_embeddings _a = type_vocab_size _a = 2 _a = num_labels _a = num_choices _a = scope _a = initializer_std # Used in the tests to check the size of the first attention layer _a = n_head # Used in the tests to check the size of the first hidden state _a = self.d_model # Used in the tests to check the number of output hidden states/attentions _a = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: _a = self.num_hidden_layers + 2 def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _a = None if self.use_input_mask: _a = random_attention_mask([self.batch_size, self.seq_length] ) _a = None if self.use_token_type_ids: _a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _a = None _a = None _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _a = ids_tensor([self.batch_size] , self.num_choices ) _a = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]: _a = TFFunnelModel(config=__UpperCAmelCase ) _a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _a = model(__UpperCAmelCase ) _a = [input_ids, input_mask] _a = model(__UpperCAmelCase ) _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) _a = False _a = TFFunnelModel(config=__UpperCAmelCase ) _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) _a = False _a = TFFunnelModel(config=__UpperCAmelCase ) _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any: _a = TFFunnelBaseModel(config=__UpperCAmelCase ) _a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _a = model(__UpperCAmelCase ) _a = [input_ids, input_mask] _a = model(__UpperCAmelCase ) _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) _a = False _a = TFFunnelBaseModel(config=__UpperCAmelCase ) _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) _a = False _a = TFFunnelBaseModel(config=__UpperCAmelCase ) _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int: _a = TFFunnelForPreTraining(config=__UpperCAmelCase ) _a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict: _a = TFFunnelForMaskedLM(config=__UpperCAmelCase ) _a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int: _a = self.num_labels _a = TFFunnelForSequenceClassification(config=__UpperCAmelCase ) _a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[str]: _a = self.num_choices _a = TFFunnelForMultipleChoice(config=__UpperCAmelCase ) _a = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _a = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _a = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) _a = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any: _a = self.num_labels _a = TFFunnelForTokenClassification(config=__UpperCAmelCase ) _a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]: _a = TFFunnelForQuestionAnswering(config=__UpperCAmelCase ) _a = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} _a = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCAmelCase ( self ) -> Tuple: _a = self.prepare_config_and_inputs() ( ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ( _a ) , ) = config_and_inputs _a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __lowerCamelCase ( a__ , a__ , unittest.TestCase ): '''simple docstring''' A_ : Dict = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) A_ : str = ( { 'feature-extraction': (TFFunnelBaseModel, TFFunnelModel), 'fill-mask': TFFunnelForMaskedLM, 'question-answering': TFFunnelForQuestionAnswering, 'text-classification': TFFunnelForSequenceClassification, 'token-classification': TFFunnelForTokenClassification, 'zero-shot': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) A_ : Union[str, Any] = False A_ : List[str] = False def _UpperCAmelCase ( self ) -> int: _a = TFFunnelModelTester(self ) _a = ConfigTester(self , config_class=__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[int]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> List[Any]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) @require_tf class __lowerCamelCase ( a__ , unittest.TestCase ): '''simple docstring''' A_ : int = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) A_ : str = False A_ : Tuple = False def _UpperCAmelCase ( self ) -> str: _a = TFFunnelModelTester(self , base=__UpperCAmelCase ) _a = ConfigTester(self , config_class=__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> str: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
320
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A_ ( _lowerCAmelCase : Dict ): """simple docstring""" if ( (cp >= 0x4e00 and cp <= 0x9fff) or (cp >= 0x3400 and cp <= 0x4dbf) # or (cp >= 0x2_0000 and cp <= 0x2_a6df) # or (cp >= 0x2_a700 and cp <= 0x2_b73f) # or (cp >= 0x2_b740 and cp <= 0x2_b81f) # or (cp >= 0x2_b820 and cp <= 0x2_ceaf) # or (cp >= 0xf900 and cp <= 0xfaff) or (cp >= 0x2_f800 and cp <= 0x2_fa1f) # ): # return True return False def A_ ( _lowerCAmelCase : str ): """simple docstring""" for char in word: _a = ord(_lowerCAmelCase ) if not _is_chinese_char(_lowerCAmelCase ): return 0 return 1 def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" _a = set() for token in tokens: _a = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase ) if chinese_word: word_set.add(_lowerCAmelCase ) _a = list(_lowerCAmelCase ) return word_list def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens _a = max([len(_lowerCAmelCase ) for w in chinese_word_set] ) _a = bert_tokens _a , _a = 0, len(_lowerCAmelCase ) while start < end: _a = True if is_chinese(bert_word[start] ): _a = min(end - start, _lowerCAmelCase ) for i in range(_lowerCAmelCase, 1, -1 ): _a = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1, start + i ): _a = '''##''' + bert_word[j] _a = start + i _a = False break if single_word: start += 1 return bert_word def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : LTP, _lowerCAmelCase : BertTokenizer ): """simple docstring""" _a = [] for i in range(0, len(_lowerCAmelCase ), 1_00 ): _a = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws _a = [get_chinese_word(_lowerCAmelCase ) for r in res] ltp_res.extend(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) _a = [] for i in range(0, len(_lowerCAmelCase ), 1_00 ): _a = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=_lowerCAmelCase, truncation=_lowerCAmelCase, max_length=5_12 ) bert_res.extend(res['''input_ids'''] ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) _a = [] for input_ids, chinese_word in zip(_lowerCAmelCase, _lowerCAmelCase ): _a = [] for id in input_ids: _a = bert_tokenizer._convert_id_to_token(_lowerCAmelCase ) input_tokens.append(_lowerCAmelCase ) _a = add_sub_symbol(_lowerCAmelCase, _lowerCAmelCase ) _a = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_lowerCAmelCase ): if token[:2] == "##": _a = token[2:] # save chinese tokens' pos if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ): ref_id.append(_lowerCAmelCase ) ref_ids.append(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) return ref_ids def A_ ( _lowerCAmelCase : Any ): """simple docstring""" with open(args.file_name, '''r''', encoding='''utf-8''' ) as f: _a = f.readlines() _a = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _a = LTP(args.ltp ) # faster in GPU device _a = BertTokenizer.from_pretrained(args.bert ) _a = prepare_ref(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) with open(args.save_path, '''w''', encoding='''utf-8''' ) as f: _a = [json.dumps(_lowerCAmelCase ) + '''\n''' for ref in ref_ids] f.writelines(_lowerCAmelCase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', required=False, type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', required=False, type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''', ) parser.add_argument( '''--bert''', required=False, type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''', ) parser.add_argument( '''--save_path''', required=False, type=str, default='''./resources/ref.txt''', help='''path to save res''', ) __snake_case = parser.parse_args() main(args)
320
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __lowerCamelCase ( a__ , a__ , unittest.TestCase ): '''simple docstring''' A_ : Any = StableDiffusionXLImgaImgPipeline A_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} A_ : Any = PipelineTesterMixin.required_optional_params - {'latents'} A_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A_ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS A_ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def _UpperCAmelCase ( self ) -> int: torch.manual_seed(0 ) _a = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) _a = EulerDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , ) torch.manual_seed(0 ) _a = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , ) _a = CLIPTextModel(__UpperCAmelCase ) _a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase ) _a = CLIPTextModelWithProjection(__UpperCAmelCase ) _a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCAmelCase ) _a = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''text_encoder_2''': text_encoder_a, '''tokenizer_2''': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Optional[int]: _a = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _a = image / 2 + 0.5 if str(__UpperCAmelCase ).startswith('''mps''' ): _a = torch.manual_seed(__UpperCAmelCase ) else: _a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _a = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 5.0, '''output_type''': '''numpy''', '''strength''': 0.75, } return inputs def _UpperCAmelCase ( self ) -> Tuple: _a = '''cpu''' # ensure determinism for the device-dependent torch.Generator _a = self.get_dummy_components() _a = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase ) _a = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _a = self.get_dummy_inputs(__UpperCAmelCase ) _a = sd_pipe(**__UpperCAmelCase ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _a = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _UpperCAmelCase ( self ) -> Tuple: super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def _UpperCAmelCase ( self ) -> List[Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def _UpperCAmelCase ( self ) -> List[Any]: pass def _UpperCAmelCase ( self ) -> str: _a = self.get_dummy_components() _a = StableDiffusionXLImgaImgPipeline(**__UpperCAmelCase ) _a = sd_pipe.to(__UpperCAmelCase ) _a = sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) # forward without prompt embeds _a = self.get_dummy_inputs(__UpperCAmelCase ) _a = 3 * ['''this is a negative prompt'''] _a = negative_prompt _a = 3 * [inputs['''prompt''']] _a = sd_pipe(**__UpperCAmelCase ) _a = output.images[0, -3:, -3:, -1] # forward with prompt embeds _a = self.get_dummy_inputs(__UpperCAmelCase ) _a = 3 * ['''this is a negative prompt'''] _a = 3 * [inputs.pop('''prompt''' )] ( ( _a ) , ( _a ) , ( _a ) , ( _a ) , ) = sd_pipe.encode_prompt(__UpperCAmelCase , negative_prompt=__UpperCAmelCase ) _a = sd_pipe( **__UpperCAmelCase , prompt_embeds=__UpperCAmelCase , negative_prompt_embeds=__UpperCAmelCase , pooled_prompt_embeds=__UpperCAmelCase , negative_pooled_prompt_embeds=__UpperCAmelCase , ) _a = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> Optional[Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0 ) -> int: _a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _a = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 4, 64, 64) ) _a = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase ) _a = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _a = self.get_inputs(__UpperCAmelCase ) _a = pipe(**__UpperCAmelCase ).images _a = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _a = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
320
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[Any] = 'gptj' A_ : Optional[int] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , __UpperCAmelCase=50400 , __UpperCAmelCase=2048 , __UpperCAmelCase=4096 , __UpperCAmelCase=28 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Union[str, Any]: _a = vocab_size _a = n_positions _a = n_embd _a = n_layer _a = n_head _a = n_inner _a = rotary_dim _a = activation_function _a = resid_pdrop _a = embd_pdrop _a = attn_pdrop _a = layer_norm_epsilon _a = initializer_range _a = use_cache _a = bos_token_id _a = eos_token_id super().__init__( bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase ) class __lowerCamelCase ( a__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "default" , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[Any]: super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ): # TODO: how to do that better? _a = 0 @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) _a = {0: '''batch''', 1: '''past_sequence + sequence'''} else: _a = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def _UpperCAmelCase ( self ) -> int: return self._config.n_layer @property def _UpperCAmelCase ( self ) -> int: return self._config.n_head def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) -> Mapping[str, Any]: _a = super(__UpperCAmelCase , self ).generate_dummy_inputs( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) # We need to order the input in the way they appears in the forward() _a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _a , _a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _a = seqlen + 2 _a = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _a = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers ) ] _a = common_inputs['''attention_mask'''] if self.use_past: _a = ordered_inputs['''attention_mask'''].dtype _a = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def _UpperCAmelCase ( self ) -> int: return 13
320
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : str = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Any = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Dict = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Union[str, Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Tuple = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Optional[Any] = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: requires_backends(cls , ['''flax'''] ) class __lowerCamelCase ( metaclass=a__ ): '''simple docstring''' A_ : Any = ['flax'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _UpperCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ['''flax'''] )
320
"""simple docstring""" import os import sys import unittest __snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __snake_case = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') __snake_case = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> str: _a = get_test_to_tester_mapping(__UpperCAmelCase ) _a = get_test_to_tester_mapping(__UpperCAmelCase ) _a = {'''BertModelTest''': '''BertModelTester'''} _a = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = get_model_to_test_mapping(__UpperCAmelCase ) _a = get_model_to_test_mapping(__UpperCAmelCase ) _a = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } _a = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = get_model_to_tester_mapping(__UpperCAmelCase ) _a = get_model_to_tester_mapping(__UpperCAmelCase ) _a = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } _a = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(get_test_info.to_json(__UpperCAmelCase ) , __UpperCAmelCase )
320
1
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Dict = 'EncodecFeatureExtractor' A_ : Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast') def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: super().__init__(__UpperCAmelCase , __UpperCAmelCase ) _a = self.feature_extractor _a = False def _UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ) -> Union[str, Any]: return self.tokenizer.get_decoder_prompt_ids(task=__UpperCAmelCase , language=__UpperCAmelCase , no_timestamps=__UpperCAmelCase ) def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase ) _a = kwargs.pop('''audio''' , __UpperCAmelCase ) _a = kwargs.pop('''sampling_rate''' , __UpperCAmelCase ) _a = kwargs.pop('''text''' , __UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: _a = args[0] _a = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if text is not None: _a = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase ) if audio is not None: _a = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase ) if audio is None: return inputs elif text is None: return audio_inputs else: _a = audio_inputs['''input_values'''] if "padding_mask" in audio_inputs: _a = audio_inputs['''padding_mask'''] return inputs def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: _a = kwargs.pop('''audio''' , __UpperCAmelCase ) _a = kwargs.pop('''padding_mask''' , __UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: _a = args[0] _a = args[1:] if audio_values is not None: return self._decode_audio(__UpperCAmelCase , padding_mask=__UpperCAmelCase ) else: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[np.ndarray]: _a = to_numpy(__UpperCAmelCase ) _a , _a , _a = audio_values.shape if padding_mask is None: return list(__UpperCAmelCase ) _a = to_numpy(__UpperCAmelCase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) _a = seq_len - padding_mask.shape[-1] _a = 1 - self.feature_extractor.padding_value _a = np.pad(__UpperCAmelCase , ((0, 0), (0, difference)) , '''constant''' , constant_values=__UpperCAmelCase ) _a = audio_values.tolist() for i in range(__UpperCAmelCase ): _a = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] _a = sliced_audio.reshape(__UpperCAmelCase , -1 ) return audio_values
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __lowerCamelCase : '''simple docstring''' @staticmethod def _UpperCAmelCase ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple: pass def A_ ( _lowerCAmelCase : Image ): """simple docstring""" _a = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def A_ ( _lowerCAmelCase : Image ): """simple docstring""" _a = np.array(_lowerCAmelCase ) _a = npimg.shape return {"hash": hashimage(_lowerCAmelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' A_ : Any = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) A_ : str = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: _a = MaskGenerationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int: pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def _UpperCAmelCase ( self ) -> List[str]: pass @slow @require_torch def _UpperCAmelCase ( self ) -> int: _a = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) _a = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 ) # Shortening by hashing _a = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.021}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9967}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.993}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9909}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9879}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9834}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9716}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9612}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9599}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9552}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9532}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9516}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9499}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9483}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9464}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.943}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9408}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9335}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9326}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9262}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8999}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8986}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8984}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8873}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8871} ] , ) # fmt: on @require_torch @slow def _UpperCAmelCase ( self ) -> Any: _a = '''facebook/sam-vit-huge''' _a = pipeline('''mask-generation''' , model=__UpperCAmelCase ) _a = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing _a = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(__UpperCAmelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0444}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0210}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0167}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0132}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0053}, ] , )
320
1
"""simple docstring""" from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance __snake_case = 637_8137.0 __snake_case = 635_6752.31_4245 __snake_case = 6378137 def A_ ( _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float, _lowerCAmelCase : float ): """simple docstring""" _a = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude _a = atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) ) _a = atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius _a = haversine_distance(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values _a = (b_lata + b_lata) / 2 _a = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) _a = (sin(_lowerCAmelCase ) ** 2) * (cos(_lowerCAmelCase ) ** 2) _a = cos(sigma / 2 ) ** 2 _a = (sigma - sin(_lowerCAmelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) _a = (cos(_lowerCAmelCase ) ** 2) * (sin(_lowerCAmelCase ) ** 2) _a = sin(sigma / 2 ) ** 2 _a = (sigma + sin(_lowerCAmelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class __lowerCamelCase : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=9 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=8 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.002 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]: _a = parent _a = batch_size _a = encoder_seq_length _a = decoder_seq_length # For common tests _a = self.decoder_seq_length _a = is_training _a = use_attention_mask _a = use_labels _a = vocab_size _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = d_ff _a = relative_attention_num_buckets _a = dropout_rate _a = initializer_factor _a = eos_token_id _a = pad_token_id _a = decoder_start_token_id _a = None _a = decoder_layers def _UpperCAmelCase ( self ) -> Dict: return TaConfig.from_pretrained('''google/umt5-base''' ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]: if attention_mask is None: _a = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _a = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCAmelCase ) if decoder_head_mask is None: _a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase ) if cross_attn_head_mask is None: _a = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=__UpperCAmelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def _UpperCAmelCase ( self ) -> Tuple: _a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) _a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _a = input_ids.clamp(self.pad_token_id + 1 ) _a = decoder_input_ids.clamp(self.pad_token_id + 1 ) _a = self.get_config() _a = config.num_attention_heads _a = self.prepare_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, input_dict def _UpperCAmelCase ( self ) -> int: _a , _a = self.prepare_config_and_inputs() return config, inputs_dict def _UpperCAmelCase ( self ) -> Tuple: return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _UpperCAmelCase ( self ) -> List[str]: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict: _a = UMTaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _a = model( input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , decoder_attention_mask=__UpperCAmelCase , ) _a = model(input_ids=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase ) _a = result.last_hidden_state _a = result.past_key_values _a = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(__UpperCAmelCase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[Any]: _a = UMTaModel(config=__UpperCAmelCase ).get_decoder().to(__UpperCAmelCase ).eval() # first forward pass _a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _a = model(__UpperCAmelCase ) _a = model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _a , _a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _a = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and _a = torch.cat([input_ids, next_tokens] , dim=-1 ) _a = model(__UpperCAmelCase )['''last_hidden_state'''] _a = model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )['''last_hidden_state'''] # select random slice _a = ids_tensor((1,) , output_from_past.shape[-1] ).item() _a = output_from_no_past[:, -1, random_slice_idx].detach() _a = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]: _a = UMTaModel(config=__UpperCAmelCase ).to(__UpperCAmelCase ).half().eval() _a = model(**__UpperCAmelCase )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(__UpperCAmelCase ).any().item() ) @require_torch class __lowerCamelCase ( a__ , a__ , a__ , unittest.TestCase ): '''simple docstring''' A_ : Optional[Any] = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) A_ : Optional[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else () A_ : int = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) A_ : str = True A_ : List[str] = False A_ : List[Any] = False A_ : str = True A_ : List[str] = True # The small UMT5 model needs higher percentages for CPU/MP tests A_ : Optional[Any] = [0.8, 0.9] def _UpperCAmelCase ( self ) -> Tuple: _a = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def _UpperCAmelCase ( self ) -> int: _a = self.model_tester.prepare_config_and_inputs() _a = UMTaModel(config_and_inputs[0] ).to(__UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( __UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: _a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] _a = self.model_tester.prepare_config_and_inputs() _a = config_and_inputs[0] _a = UMTaForConditionalGeneration(__UpperCAmelCase ).eval() model.to(__UpperCAmelCase ) _a = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__UpperCAmelCase ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ), } for attn_name, (name, mask) in zip(__UpperCAmelCase , head_masking.items() ): _a = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": _a = torch.ones( config.num_decoder_layers , config.num_heads , device=__UpperCAmelCase ) _a = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__UpperCAmelCase , return_dict_in_generate=__UpperCAmelCase , **__UpperCAmelCase , ) # We check the state of decoder_attentions and cross_attentions just from the last step _a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def _UpperCAmelCase ( self ) -> int: pass @require_torch @require_sentencepiece @require_tokenizers class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def _UpperCAmelCase ( self ) -> Optional[int]: _a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__UpperCAmelCase ).to(__UpperCAmelCase ) _a = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__UpperCAmelCase , legacy=__UpperCAmelCase ) _a = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] _a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' , padding=__UpperCAmelCase ).input_ids # fmt: off _a = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(__UpperCAmelCase , __UpperCAmelCase ) _a = model.generate(input_ids.to(__UpperCAmelCase ) ) _a = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] _a = tokenizer.batch_decode(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
320
1
"""simple docstring""" import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @property def _UpperCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _a = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def _UpperCAmelCase ( self ) -> Tuple: _a = self.dummy_uncond_unet _a = ScoreSdeVeScheduler() _a = ScoreSdeVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) sde_ve.to(__UpperCAmelCase ) sde_ve.set_progress_bar_config(disable=__UpperCAmelCase ) _a = torch.manual_seed(0 ) _a = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__UpperCAmelCase ).images _a = torch.manual_seed(0 ) _a = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__UpperCAmelCase , return_dict=__UpperCAmelCase )[ 0 ] _a = image[0, -3:, -3:, -1] _a = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _a = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> str: _a = '''google/ncsnpp-church-256''' _a = UNetaDModel.from_pretrained(__UpperCAmelCase ) _a = ScoreSdeVeScheduler.from_pretrained(__UpperCAmelCase ) _a = ScoreSdeVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) sde_ve.to(__UpperCAmelCase ) sde_ve.set_progress_bar_config(disable=__UpperCAmelCase ) _a = torch.manual_seed(0 ) _a = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=__UpperCAmelCase ).images _a = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _a = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
320
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class __lowerCamelCase : '''simple docstring''' def __init__( self ) -> Tuple: _a = {} def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> int: if self.graph.get(__UpperCAmelCase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: _a = [[w, v]] if not self.graph.get(__UpperCAmelCase ): _a = [] def _UpperCAmelCase ( self ) -> int: return list(self.graph ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]: if s == d: return [] _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple: if c == -1: _a = floor(random() * 10000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _a = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[str]: _a = deque() _a = [] if s == -2: _a = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple: _a = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict: return len(self.graph[u] ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s _a = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return sorted_nodes def _UpperCAmelCase ( self ) -> Optional[int]: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Any: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]: _a = time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _a = time() return end - begin def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Optional[Any]: _a = time() self.bfs(__UpperCAmelCase ) _a = time() return end - begin class __lowerCamelCase : '''simple docstring''' def __init__( self ) -> Optional[int]: _a = {} def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Dict: # check if the u exists if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist _a = [[w, v]] # add the other way if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist _a = [[w, u]] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) # the other way round if self.graph.get(__UpperCAmelCase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict: if s == d: return [] _a = [] _a = [] if s == -2: _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _UpperCAmelCase ( self , __UpperCAmelCase=-1 ) -> Tuple: if c == -1: _a = floor(random() * 10000 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): _a = floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]: _a = deque() _a = [] if s == -2: _a = list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _a = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Dict: return len(self.graph[u] ) def _UpperCAmelCase ( self ) -> int: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: _a = [] _a = [] _a = list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _a = -2 _a = [] _a = s _a = False _a = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _a = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _a = len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _a = node[1] break # check if all the children are visited if s == ss: stack.pop() _a = True if len(__UpperCAmelCase ) != 0: _a = stack[len(__UpperCAmelCase ) - 1] else: _a = False indirect_parents.append(__UpperCAmelCase ) _a = s _a = ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _UpperCAmelCase ( self ) -> Union[str, Any]: return list(self.graph ) def _UpperCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Tuple: _a = time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _a = time() return end - begin def _UpperCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _a = time() self.bfs(__UpperCAmelCase ) _a = time() return end - begin
320
1
"""simple docstring""" import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : str = 'MCTCTFeatureExtractor' A_ : Tuple = 'AutoTokenizer' def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: super().__init__(__UpperCAmelCase , __UpperCAmelCase ) _a = self.feature_extractor _a = False def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) _a = kwargs.pop('''raw_speech''' ) else: _a = kwargs.pop('''audio''' , __UpperCAmelCase ) _a = kwargs.pop('''sampling_rate''' , __UpperCAmelCase ) _a = kwargs.pop('''text''' , __UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: _a = args[0] _a = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _a = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None: _a = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: _a = encodings['''input_ids'''] return inputs def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*__UpperCAmelCase , **__UpperCAmelCase ) _a = kwargs.pop('''input_features''' , __UpperCAmelCase ) _a = kwargs.pop('''labels''' , __UpperCAmelCase ) if len(__UpperCAmelCase ) > 0: _a = args[0] _a = args[1:] if input_features is not None: _a = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) if labels is not None: _a = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase ) if labels is None: return input_features elif input_features is None: return labels else: _a = labels['''input_ids'''] return input_features def _UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @contextmanager def _UpperCAmelCase ( self ) -> List[Any]: warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) _a = True _a = self.tokenizer yield _a = self.feature_extractor _a = False
320
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Dict = 'unispeech' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=320 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , __UpperCAmelCase=100 , __UpperCAmelCase=256 , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=80 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=0.5 , **__UpperCAmelCase , ) -> Union[str, Any]: super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) _a = hidden_size _a = feat_extract_norm _a = feat_extract_activation _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = list(__UpperCAmelCase ) _a = conv_bias _a = num_conv_pos_embeddings _a = num_conv_pos_embedding_groups _a = len(self.conv_dim ) _a = num_hidden_layers _a = intermediate_size _a = hidden_act _a = num_attention_heads _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = feat_proj_dropout _a = final_dropout _a = layerdrop _a = layer_norm_eps _a = initializer_range _a = num_ctc_classes _a = vocab_size _a = do_stable_layer_norm _a = use_weighted_layer_sum _a = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a = apply_spec_augment _a = mask_time_prob _a = mask_time_length _a = mask_time_min_masks _a = mask_feature_prob _a = mask_feature_length _a = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _a = num_codevectors_per_group _a = num_codevector_groups _a = contrastive_logits_temperature _a = feat_quantizer_dropout _a = num_negatives _a = codevector_dim _a = proj_codevector_dim _a = diversity_loss_weight # ctc loss _a = ctc_loss_reduction _a = ctc_zero_infinity # pretraining loss _a = replace_prob @property def _UpperCAmelCase ( self ) -> Optional[int]: return functools.reduce(operator.mul , self.conv_stride , 1 )
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __snake_case = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SEWForCTC''', '''SEWForSequenceClassification''', '''SEWModel''', '''SEWPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
320
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: __snake_case = None __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } __snake_case = { '''google/rembert''': 256, } __snake_case = '''▁''' class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : Optional[Any] = VOCAB_FILES_NAMES A_ : List[str] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : List[Any] = RemBertTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> List[Any]: # Mask token behave like a normal word, i.e. include the space before it _a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) _a = do_lower_case _a = remove_space _a = keep_accents _a = vocab_file _a = False if not self.vocab_file else True def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _a = [self.sep_token_id] _a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error('''Vocabulary path ({}) should be a directory'''.format(__UpperCAmelCase ) ) return _a = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
320
1
"""simple docstring""" import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __snake_case = logging.getLogger(__name__) class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : List[Any] = 'token-classification' def __init__( self , __UpperCAmelCase ) -> List[Any]: if type(__UpperCAmelCase ) == dict: _a = Namespace(**__UpperCAmelCase ) _a = import_module('''tasks''' ) try: _a = getattr(__UpperCAmelCase , hparams.task_type ) _a = token_classification_task_clazz() except AttributeError: raise ValueError( F'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ' F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' ) _a = self.token_classification_task.get_labels(hparams.labels ) _a = CrossEntropyLoss().ignore_index super().__init__(__UpperCAmelCase , len(self.labels ) , self.mode ) def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Optional[Any]: return self.model(**__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: _a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type != "distilbert": _a = ( batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None ) # XLM and RoBERTa don"t use token_type_ids _a = self(**__UpperCAmelCase ) _a = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def _UpperCAmelCase ( self ) -> List[Any]: _a = self.hparams for mode in ["train", "dev", "test"]: _a = self._feature_file(__UpperCAmelCase ) if os.path.exists(__UpperCAmelCase ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , __UpperCAmelCase ) _a = torch.load(__UpperCAmelCase ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) _a = self.token_classification_task.read_examples_from_file(args.data_dir , __UpperCAmelCase ) _a = self.token_classification_task.convert_examples_to_features( __UpperCAmelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__UpperCAmelCase , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info('''Saving features into cached file %s''' , __UpperCAmelCase ) torch.save(__UpperCAmelCase , __UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ) -> DataLoader: _a = self._feature_file(__UpperCAmelCase ) logger.info('''Loading features from cached file %s''' , __UpperCAmelCase ) _a = torch.load(__UpperCAmelCase ) _a = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) _a = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: _a = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: _a = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) _a = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , batch_size=__UpperCAmelCase ) def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: """Compute validation""" "" _a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type != "distilbert": _a = ( batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None ) # XLM and RoBERTa don"t use token_type_ids _a = self(**__UpperCAmelCase ) _a , _a = outputs[:2] _a = logits.detach().cpu().numpy() _a = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: _a = torch.stack([x['''val_loss'''] for x in outputs] ).mean() _a = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) _a = np.argmax(__UpperCAmelCase , axis=2 ) _a = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) _a = dict(enumerate(self.labels ) ) _a = [[] for _ in range(out_label_ids.shape[0] )] _a = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) _a = { '''val_loss''': val_loss_mean, '''accuracy_score''': accuracy_score(__UpperCAmelCase , __UpperCAmelCase ), '''precision''': precision_score(__UpperCAmelCase , __UpperCAmelCase ), '''recall''': recall_score(__UpperCAmelCase , __UpperCAmelCase ), '''f1''': fa_score(__UpperCAmelCase , __UpperCAmelCase ), } _a = dict(results.items() ) _a = results return ret, preds_list, out_label_list def _UpperCAmelCase ( self , __UpperCAmelCase ) -> str: # when stable _a , _a , _a = self._eval_end(__UpperCAmelCase ) _a = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Any: # updating to test_epoch_end instead of deprecated test_end _a , _a , _a = self._eval_end(__UpperCAmelCase ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 _a = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: # Add NER specific options BaseTransformer.add_model_specific_args(__UpperCAmelCase , __UpperCAmelCase ) parser.add_argument( '''--task_type''' , default='''NER''' , type=__UpperCAmelCase , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=__UpperCAmelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--labels''' , default='''''' , type=__UpperCAmelCase , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , ) parser.add_argument( '''--gpus''' , default=0 , type=__UpperCAmelCase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser if __name__ == "__main__": __snake_case = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __snake_case = NERTransformer.add_model_specific_args(parser, os.getcwd()) __snake_case = parser.parse_args() __snake_case = NERTransformer(args) __snake_case = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __snake_case = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True)) __snake_case = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __snake_case = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
320
1