code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class lowerCAmelCase_ ( _snake_case ): _UpperCamelCase : Tuple = 'char' _UpperCamelCase : Optional[Any] = 'bpe' _UpperCamelCase : Tuple = 'wp' UpperCamelCase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class lowerCAmelCase_ ( _snake_case ): _UpperCamelCase : int = ['image_processor', 'char_tokenizer'] _UpperCamelCase : int = 'ViTImageProcessor' _UpperCamelCase : Optional[Any] = 'MgpstrTokenizer' def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): _lowercase : int = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _lowerCAmelCase , ) _lowercase : Any = kwargs.pop('feature_extractor' ) _lowercase : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) _lowercase : Any = tokenizer _lowercase : Any = AutoTokenizer.from_pretrained('gpt2' ) _lowercase : int = AutoTokenizer.from_pretrained('bert-base-uncased' ) super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: _lowercase : Dict = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if text is not None: _lowercase : Any = self.char_tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if text is None: return inputs elif images is None: return encodings else: _lowercase : Optional[Any] = encodings['input_ids'] return inputs def __a ( self , _lowerCAmelCase ): _lowercase , _lowercase , _lowercase : Any = sequences _lowercase : List[Any] = char_preds.size(0 ) _lowercase , _lowercase : Union[str, Any] = self._decode_helper(_lowerCAmelCase , 'char' ) _lowercase , _lowercase : Tuple = self._decode_helper(_lowerCAmelCase , 'bpe' ) _lowercase , _lowercase : int = self._decode_helper(_lowerCAmelCase , 'wp' ) _lowercase : str = [] _lowercase : Optional[Any] = [] for i in range(_lowerCAmelCase ): _lowercase : Dict = [char_scores[i], bpe_scores[i], wp_scores[i]] _lowercase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]] _lowercase : Union[str, Any] = scores.index(max(_lowerCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) _lowercase : Optional[int] = {} _lowercase : Tuple = final_strs _lowercase : int = final_scores _lowercase : int = char_strs _lowercase : int = bpe_strs _lowercase : Optional[int] = wp_strs return out def __a ( self , _lowerCAmelCase , _lowerCAmelCase ): if format == DecodeType.CHARACTER: _lowercase : Union[str, Any] = self.char_decode _lowercase : Tuple = 1 _lowercase : Tuple = '[s]' elif format == DecodeType.BPE: _lowercase : Tuple = self.bpe_decode _lowercase : int = 2 _lowercase : Dict = '#' elif format == DecodeType.WORDPIECE: _lowercase : Dict = self.wp_decode _lowercase : List[Any] = 1_0_2 _lowercase : Union[str, Any] = '[SEP]' else: raise ValueError(F"""Format {format} is not supported.""" ) _lowercase , _lowercase : str = [], [] _lowercase : Dict = pred_logits.size(0 ) _lowercase : Any = pred_logits.size(1 ) _lowercase , _lowercase : Any = pred_logits.topk(1 , dim=-1 , largest=_lowerCAmelCase , sorted=_lowerCAmelCase ) _lowercase : Union[str, Any] = preds_index.view(-1 , _lowerCAmelCase )[:, 1:] _lowercase : Optional[Any] = decoder(_lowerCAmelCase ) _lowercase , _lowercase : Tuple = torch.nn.functional.softmax(_lowerCAmelCase , dim=2 ).max(dim=2 ) _lowercase : str = preds_max_prob[:, 1:] for index in range(_lowerCAmelCase ): _lowercase : int = preds_str[index].find(_lowerCAmelCase ) _lowercase : int = preds_str[index][:pred_eos] _lowercase : str = preds_index[index].cpu().tolist() _lowercase : List[str] = pred_index.index(_lowerCAmelCase ) if eos_token in pred_index else -1 _lowercase : str = preds_max_prob[index][: pred_eos_index + 1] _lowercase : List[Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(_lowerCAmelCase ) conf_scores.append(_lowerCAmelCase ) return dec_strs, conf_scores def __a ( self , _lowerCAmelCase ): _lowercase : Any = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(_lowerCAmelCase )] return decode_strs def __a ( self , _lowerCAmelCase ): return self.bpe_tokenizer.batch_decode(_lowerCAmelCase ) def __a ( self , _lowerCAmelCase ): _lowercase : Optional[Any] = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(_lowerCAmelCase )] return decode_strs
713
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { "configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", "Swinv2Model", "Swinv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class lowerCAmelCase_ : def __init__( self ): _lowercase : int = {} def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1 ): if self.graph.get(_UpperCAmelCase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: _lowercase : Optional[int] = [[w, v]] if not self.graph.get(_UpperCAmelCase ): _lowercase : List[str] = [] def __a ( self ): return list(self.graph ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase ): if self.graph.get(_UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_UpperCAmelCase ) def __a ( self , _lowerCAmelCase=-2 , _lowerCAmelCase=-1 ): if s == d: return [] _lowercase : Tuple = [] _lowercase : str = [] if s == -2: _lowercase : Union[str, Any] = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _lowercase : List[str] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowercase : Dict = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _lowercase : int = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_UpperCAmelCase ) != 0: _lowercase : List[Any] = stack[len(_UpperCAmelCase ) - 1] else: _lowercase : List[Any] = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return visited def __a ( self , _lowerCAmelCase=-1 ): if c == -1: _lowercase : Optional[Any] = floor(random() * 1_0_0_0_0 ) + 1_0 for i in range(_UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 1_0_2 ) + 1 ): _lowercase : Optional[int] = floor(random() * c ) + 1 if n != i: self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 ) def __a ( self , _lowerCAmelCase=-2 ): _lowercase : Dict = deque() _lowercase : Union[str, Any] = [] if s == -2: _lowercase : List[Any] = list(self.graph )[0] d.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) while d: _lowercase : Dict = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def __a ( self , _lowerCAmelCase ): _lowercase : Optional[int] = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def __a ( self , _lowerCAmelCase ): return len(self.graph[u] ) def __a ( self , _lowerCAmelCase=-2 ): _lowercase : Union[str, Any] = [] _lowercase : List[str] = [] if s == -2: _lowercase : int = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _lowercase : List[str] = s _lowercase : Optional[int] = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowercase : List[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowercase : List[str] = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(_UpperCAmelCase ) != 0: _lowercase : List[Any] = stack[len(_UpperCAmelCase ) - 1] else: _lowercase : Tuple = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return sorted_nodes def __a ( self ): _lowercase : List[Any] = [] _lowercase : Optional[Any] = [] _lowercase : Tuple = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _lowercase : Optional[int] = -2 _lowercase : Dict = [] _lowercase : str = s _lowercase : Optional[int] = False _lowercase : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowercase : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _lowercase : Any = len(_UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowercase : List[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() _lowercase : Optional[Any] = True if len(_UpperCAmelCase ) != 0: _lowercase : List[str] = stack[len(_UpperCAmelCase ) - 1] else: _lowercase : Optional[Any] = False indirect_parents.append(_UpperCAmelCase ) _lowercase : Dict = s _lowercase : Union[str, Any] = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return list(_UpperCAmelCase ) def __a ( self ): _lowercase : str = [] _lowercase : Any = [] _lowercase : List[str] = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _lowercase : List[str] = -2 _lowercase : Optional[Any] = [] _lowercase : List[Any] = s _lowercase : Optional[Any] = False _lowercase : Dict = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowercase : Dict = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _lowercase : int = len(_UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowercase : int = node[1] break # check if all the children are visited if s == ss: stack.pop() _lowercase : List[str] = True if len(_UpperCAmelCase ) != 0: _lowercase : Optional[Any] = stack[len(_UpperCAmelCase ) - 1] else: _lowercase : Dict = False indirect_parents.append(_UpperCAmelCase ) _lowercase : Optional[int] = s _lowercase : Union[str, Any] = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return False def __a ( self , _lowerCAmelCase=-2 , _lowerCAmelCase=-1 ): _lowercase : str = time() self.dfs(_UpperCAmelCase , _UpperCAmelCase ) _lowercase : Any = time() return end - begin def __a ( self , _lowerCAmelCase=-2 ): _lowercase : Dict = time() self.bfs(_UpperCAmelCase ) _lowercase : str = time() return end - begin class lowerCAmelCase_ : def __init__( self ): _lowercase : str = {} def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1 ): if self.graph.get(_UpperCAmelCase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist _lowercase : Union[str, Any] = [[w, v]] # add the other way if self.graph.get(_UpperCAmelCase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist _lowercase : Dict = [[w, u]] def __a ( self , _lowerCAmelCase , _lowerCAmelCase ): if self.graph.get(_UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(_UpperCAmelCase ) # the other way round if self.graph.get(_UpperCAmelCase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(_UpperCAmelCase ) def __a ( self , _lowerCAmelCase=-2 , _lowerCAmelCase=-1 ): if s == d: return [] _lowercase : Dict = [] _lowercase : List[Any] = [] if s == -2: _lowercase : Tuple = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _lowercase : List[str] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowercase : List[str] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(_UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _lowercase : Union[str, Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(_UpperCAmelCase ) != 0: _lowercase : Optional[Any] = stack[len(_UpperCAmelCase ) - 1] else: _lowercase : Union[str, Any] = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return visited def __a ( self , _lowerCAmelCase=-1 ): if c == -1: _lowercase : str = floor(random() * 1_0_0_0_0 ) + 1_0 for i in range(_UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 1_0_2 ) + 1 ): _lowercase : List[Any] = floor(random() * c ) + 1 if n != i: self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 ) def __a ( self , _lowerCAmelCase=-2 ): _lowercase : Dict = deque() _lowercase : Union[str, Any] = [] if s == -2: _lowercase : int = list(self.graph )[0] d.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) while d: _lowercase : Optional[Any] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def __a ( self , _lowerCAmelCase ): return len(self.graph[u] ) def __a ( self ): _lowercase : Dict = [] _lowercase : List[Any] = [] _lowercase : str = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _lowercase : Optional[int] = -2 _lowercase : List[str] = [] _lowercase : Optional[Any] = s _lowercase : List[str] = False _lowercase : Dict = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowercase : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _lowercase : Dict = len(_UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowercase : Any = node[1] break # check if all the children are visited if s == ss: stack.pop() _lowercase : Tuple = True if len(_UpperCAmelCase ) != 0: _lowercase : str = stack[len(_UpperCAmelCase ) - 1] else: _lowercase : List[Any] = False indirect_parents.append(_UpperCAmelCase ) _lowercase : int = s _lowercase : str = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return list(_UpperCAmelCase ) def __a ( self ): _lowercase : int = [] _lowercase : str = [] _lowercase : str = list(self.graph )[0] stack.append(_UpperCAmelCase ) visited.append(_UpperCAmelCase ) _lowercase : List[str] = -2 _lowercase : Optional[Any] = [] _lowercase : Optional[int] = s _lowercase : int = False _lowercase : Dict = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowercase : Any = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _lowercase : Dict = len(_UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowercase : Dict = node[1] break # check if all the children are visited if s == ss: stack.pop() _lowercase : List[str] = True if len(_UpperCAmelCase ) != 0: _lowercase : List[Any] = stack[len(_UpperCAmelCase ) - 1] else: _lowercase : Dict = False indirect_parents.append(_UpperCAmelCase ) _lowercase : Dict = s _lowercase : Any = ss # check if se have reached the starting point if len(_UpperCAmelCase ) == 0: return False def __a ( self ): return list(self.graph ) def __a ( self , _lowerCAmelCase=-2 , _lowerCAmelCase=-1 ): _lowercase : Any = time() self.dfs(_UpperCAmelCase , _UpperCAmelCase ) _lowercase : List[Any] = time() return end - begin def __a ( self , _lowerCAmelCase=-2 ): _lowercase : Tuple = time() self.bfs(_UpperCAmelCase ) _lowercase : int = time() return end - begin
714
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase = { "vocab_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt" ), "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json" ), "google/electra-base-generator": ( "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json" ), "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json" ), }, } UpperCamelCase = { "google/electra-small-generator": 512, "google/electra-base-generator": 512, "google/electra-large-generator": 512, "google/electra-small-discriminator": 512, "google/electra-base-discriminator": 512, "google/electra-large-discriminator": 512, } UpperCamelCase = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Any = VOCAB_FILES_NAMES _UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[str] = ElectraTokenizer def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ): super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) _lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars ): _lowercase : Any = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) ) _lowercase : Dict = do_lower_case _lowercase : Optional[Any] = strip_accents _lowercase : Any = tokenize_chinese_chars _lowercase : Tuple = normalizer_class(**_lowerCAmelCase ) _lowercase : Union[str, Any] = do_lower_case def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ): _lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : str = [self.sep_token_id] _lowercase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : Any = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
677
0
import itertools import math def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __magic_name__ ( ) -> int: _lowercase : Tuple = 2 while True: if is_prime(__A ): yield num num += 1 def __magic_name__ ( SCREAMING_SNAKE_CASE = 10_001 ) -> str: return next(itertools.islice(prime_generator() , nth - 1 , __A ) ) if __name__ == "__main__": print(f'''{solution() = }''')
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
from __future__ import annotations def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]: return [ord(_lowerCamelCase ) - 96 for elem in plain] def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple: return "".join(chr(elem + 96 ) for elem in encoded ) def __magic_name__ ( ) -> List[Any]: _lowercase : List[Any] = encode(input('-> ' ).strip().lower() ) print('Encoded: ' , _lowerCamelCase ) print('Decoded:' , decode(_lowerCamelCase ) ) if __name__ == "__main__": main()
716
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: for attribute in key.split('.' ): _lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: _lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: _lowercase : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowercase : List[str] = value elif weight_type == "weight_g": _lowercase : Any = value elif weight_type == "weight_v": _lowercase : Tuple = value elif weight_type == "bias": _lowercase : List[str] = value else: _lowercase : Dict = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Optional[int] = [] _lowercase : Optional[int] = fairseq_model.state_dict() _lowercase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _lowercase : Dict = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) _lowercase : int = True else: for key, mapped_key in MAPPING.items(): _lowercase : Union[str, Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned): _lowercase : Union[str, Any] = True if "*" in mapped_key: _lowercase : Dict = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2] _lowercase : Dict = mapped_key.replace('*' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: _lowercase : Optional[int] = 'weight_g' elif "weight_v" in name: _lowercase : Optional[Any] = 'weight_v' elif "weight" in name: _lowercase : str = 'weight' elif "bias" in name: _lowercase : Any = 'bias' else: _lowercase : str = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Any = full_name.split('conv_layers.' )[-1] _lowercase : Any = name.split('.' ) _lowercase : Optional[Any] = int(items[0] ) _lowercase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowercase : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowercase : List[str] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _lowercase : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowercase : List[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) @torch.no_grad() def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: if config_path is not None: _lowercase : Optional[int] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: _lowercase : List[Any] = HubertConfig() if is_finetuned: if dict_path: _lowercase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowercase : Dict = target_dict.pad_index _lowercase : Dict = target_dict.bos_index _lowercase : Tuple = target_dict.eos_index _lowercase : List[Any] = len(target_dict.symbols ) _lowercase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) ) return os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , SCREAMING_SNAKE_CASE ) _lowercase : int = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , ) _lowercase : str = True if config.feat_extract_norm == 'layer' else False _lowercase : Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) _lowercase : Tuple = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE ) else: _lowercase : List[Any] = HubertModel(SCREAMING_SNAKE_CASE ) if is_finetuned: _lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: _lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _lowercase : int = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) UpperCamelCase = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
677
0
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=3 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3 , _lowerCAmelCase=1_0 , _lowerCAmelCase=[8, 1_6, 3_2, 6_4] , _lowerCAmelCase=[1, 1, 2, 1] , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=3 , _lowerCAmelCase=None , _lowerCAmelCase=["stage2", "stage3", "stage4"] , _lowerCAmelCase=[2, 3, 4] , _lowerCAmelCase=1 , ): _lowercase : Optional[int] = parent _lowercase : List[str] = batch_size _lowercase : Tuple = image_size _lowercase : List[str] = num_channels _lowercase : List[str] = embeddings_size _lowercase : List[str] = hidden_sizes _lowercase : str = depths _lowercase : Optional[Any] = is_training _lowercase : int = use_labels _lowercase : Optional[int] = hidden_act _lowercase : List[Any] = num_labels _lowercase : List[str] = scope _lowercase : str = len(__lowerCamelCase ) _lowercase : Optional[int] = out_features _lowercase : str = out_indices _lowercase : Optional[int] = num_groups def __a ( self ): _lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowercase : Optional[int] = None if self.use_labels: _lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) _lowercase : Tuple = self.get_config() return config, pixel_values, labels def __a ( self ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Any = BitModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _lowercase : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Union[str, Any] = self.num_labels _lowercase : List[str] = BitForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _lowercase : str = model(__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Dict = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _lowercase : Optional[Any] = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None _lowercase : Optional[Any] = None _lowercase : Optional[int] = BitBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _lowercase : Any = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def __a ( self ): _lowercase : List[str] = self.prepare_config_and_inputs() _lowercase : Tuple = config_and_inputs _lowercase : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _A , _A , unittest.TestCase ): _UpperCamelCase : Tuple = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _UpperCamelCase : int = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) _UpperCamelCase : Union[str, Any] = False _UpperCamelCase : Optional[int] = False _UpperCamelCase : int = False _UpperCamelCase : Dict = False _UpperCamelCase : Optional[int] = False def __a ( self ): _lowercase : Any = BitModelTester(self ) _lowercase : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def __a ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __a ( self ): return @unittest.skip(reason='Bit does not output attentions' ) def __a ( self ): pass @unittest.skip(reason='Bit does not use inputs_embeds' ) def __a ( self ): pass @unittest.skip(reason='Bit does not support input and output embeddings' ) def __a ( self ): pass def __a ( self ): _lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : Dict = model_class(__lowerCamelCase ) _lowercase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase : Optional[Any] = [*signature.parameters.keys()] _lowercase : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def __a ( self ): _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def __a ( self ): _lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__lowerCamelCase ) def __a ( self ): _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : Optional[int] = model_class(config=__lowerCamelCase ) for name, module in model.named_modules(): if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def __a ( self ): def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Dict = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): _lowercase : List[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) _lowercase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowercase : List[Any] = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : Dict = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: _lowercase : Dict = layer_type _lowercase : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowercase : Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) @unittest.skip(reason='Bit does not use feedforward chunking' ) def __a ( self ): pass def __a ( self ): _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def __a ( self ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : Optional[Any] = BitModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def __magic_name__ ( ) -> Tuple: _lowercase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def __a ( self ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __a ( self ): _lowercase : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase ) _lowercase : List[Any] = self.default_image_processor _lowercase : List[Any] = prepare_img() _lowercase : Tuple = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): _lowercase : Union[str, Any] = model(**__lowerCamelCase ) # verify the logits _lowercase : str = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) _lowercase : Optional[Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) ) @require_torch class lowerCAmelCase_ ( _A , unittest.TestCase ): _UpperCamelCase : List[Any] = (BitBackbone,) if is_torch_available() else () _UpperCamelCase : Optional[Any] = BitConfig _UpperCamelCase : Dict = False def __a ( self ): _lowercase : Union[str, Any] = BitModelTester(self )
717
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ): _lowercase : List[str] = parent _lowercase : Optional[Any] = batch_size _lowercase : str = seq_length _lowercase : Dict = is_training _lowercase : Optional[int] = use_input_mask _lowercase : List[Any] = use_token_type_ids _lowercase : Union[str, Any] = use_labels _lowercase : Optional[Any] = vocab_size _lowercase : Optional[Any] = hidden_size _lowercase : str = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[Any] = hidden_act _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : int = max_position_embeddings _lowercase : str = type_vocab_size _lowercase : Tuple = type_sequence_label_size _lowercase : Dict = initializer_range _lowercase : List[Any] = num_labels _lowercase : List[str] = num_choices _lowercase : Dict = scope _lowercase : List[Any] = range_bbox def __a ( self ): _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _lowercase : List[str] = bbox[i, j, 3] _lowercase : Optional[int] = bbox[i, j, 1] _lowercase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowercase : Dict = bbox[i, j, 2] _lowercase : Dict = bbox[i, j, 0] _lowercase : int = t _lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase ) _lowercase : Any = None if self.use_input_mask: _lowercase : int = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : Tuple = None if self.use_token_type_ids: _lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : Tuple = None _lowercase : Union[str, Any] = None _lowercase : List[str] = None if self.use_labels: _lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : str = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : Any = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase ) _lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase ) _lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : str = self.num_labels _lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase ) _lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Any = self.num_labels _lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase ) _lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase ) _lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self ): _lowercase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : List[Any] = config_and_inputs _lowercase : Optional[Any] = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCamelCase : Optional[int] = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _UpperCamelCase : Union[str, Any] = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _UpperCamelCase : str = False _UpperCamelCase : List[str] = True _UpperCamelCase : Tuple = 10 def __a ( self ): _lowercase : Optional[int] = TFLayoutLMModelTester(self ) _lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __a ( self ): _lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) @slow def __a ( self ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def __a ( self ): pass def __magic_name__ ( ) -> Optional[int]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off _lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231 _lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 _lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231 _lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) _lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @slow def __a ( self ): _lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) # test the sequence output on [0, :3, :3] _lowercase : Optional[Any] = tf.convert_to_tensor( [[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) ) # test the pooled output on [1, :3] _lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) ) @slow def __a ( self ): # initialize model with randomly initialized sequence classification head _lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Any = model( input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar _lowercase : List[Any] = outputs.loss _lowercase : Any = (2,) self.assertEqual(loss.shape , _lowerCAmelCase ) # test the shape of the logits _lowercase : str = outputs.logits _lowercase : Dict = (2, 2) self.assertEqual(logits.shape , _lowerCAmelCase ) @slow def __a ( self ): # initialize model with randomly initialized token classification head _lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Dict = model( input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) # test the shape of the logits _lowercase : Dict = outputs.logits _lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) ) self.assertEqual(logits.shape , _lowerCAmelCase ) @slow def __a ( self ): # initialize model with randomly initialized token classification head _lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) # test the shape of the logits _lowercase : Any = tf.convert_to_tensor((2, 2_5) ) self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase ) self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
677
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase ) class lowerCAmelCase_ ( _UpperCAmelCase ): _UpperCamelCase : Optional[Any] = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} ) _UpperCamelCase : List[str] = Features({"text": Value("string" )} ) _UpperCamelCase : Optional[int] = Features({} ) _UpperCamelCase : Tuple = "text" @property def __a ( self ): return {self.text_column: "text"}
718
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): _lowercase : List[str] = logging.get_logger() # the current default level is logging.WARNING _lowercase : Union[str, Any] = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(_lowerCAmelCase ) def __a ( self ): _lowercase : List[str] = logging.get_verbosity() _lowercase : int = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : Tuple = 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(_lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def __a ( self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var _lowercase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : int = os.getenv('TRANSFORMERS_VERBOSITY' , _lowerCAmelCase ) _lowercase : Optional[Any] = logging.log_levels[env_level_str] _lowercase : Dict = logging.get_verbosity() self.assertEqual( _lowerCAmelCase , _lowerCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level _lowercase : Any = '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def __a ( self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() _lowercase : Tuple = logging.logging.getLogger() with CaptureLogger(_lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def __a ( self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() _lowercase : str = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : List[str] = 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning_advice(_lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning_advice(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def __magic_name__ ( ) -> List[str]: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
677
0
from collections.abc import Callable import numpy as np def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> np.array: _lowercase : Tuple = int(np.ceil((x_end - xa) / step_size ) ) _lowercase : List[Any] = np.zeros((n + 1,) ) _lowercase : int = ya _lowercase : Optional[int] = xa for k in range(_A ): _lowercase : Tuple = y[k] + step_size * ode_func(_A , y[k] ) _lowercase : str = y[k] + ( (step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
719
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): UpperCamelCase = "pt" elif is_tf_available(): UpperCamelCase = "tf" else: UpperCamelCase = "jax" class lowerCAmelCase_ ( __snake_case , unittest.TestCase ): _UpperCamelCase : Dict = PerceiverTokenizer _UpperCamelCase : str = False def __a ( self ): super().setUp() _lowercase : List[Any] = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def __a ( self , **_lowerCAmelCase ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=2_0 , _lowerCAmelCase=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. _lowercase : Union[str, Any] = [] for i in range(len(_lowerCAmelCase ) ): try: _lowercase : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) _lowercase : List[Any] = list(filter(lambda _lowerCAmelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _lowerCAmelCase ) ) _lowercase : Union[str, Any] = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCAmelCase ) , _lowerCAmelCase ) ) if max_length is not None and len(_lowerCAmelCase ) > max_length: _lowercase : Any = toks[:max_length] if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0: while len(_lowerCAmelCase ) < min_length: _lowercase : Optional[Any] = toks + toks # toks_str = [t[1] for t in toks] _lowercase : Optional[Any] = [t[0] for t in toks] # Ensure consistency _lowercase : Any = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) if " " not in output_txt and len(_lowerCAmelCase ) > 1: _lowercase : List[str] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase ) ) if with_prefix_space: _lowercase : List[Any] = ' ' + output_txt _lowercase : Dict = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) return output_txt, output_ids def __a ( self ): _lowercase : Dict = self.perceiver_tokenizer _lowercase : Optional[Any] = 'Unicode €.' _lowercase : str = tokenizer(_lowerCAmelCase ) _lowercase : int = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded['input_ids'] , _lowerCAmelCase ) # decoding _lowercase : List[Any] = tokenizer.decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , '[CLS]Unicode €.[SEP]' ) _lowercase : Union[str, Any] = tokenizer('e è é ê ë' ) _lowercase : List[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded['input_ids'] , _lowerCAmelCase ) # decoding _lowercase : int = tokenizer.decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def __a ( self ): _lowercase : List[str] = self.perceiver_tokenizer _lowercase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off _lowercase : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on _lowercase : List[Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) if FRAMEWORK != "jax": _lowercase : int = list(batch.input_ids.numpy()[0] ) else: _lowercase : List[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def __a ( self ): _lowercase : List[Any] = self.perceiver_tokenizer _lowercase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _lowercase : List[str] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _lowerCAmelCase ) self.assertIn('attention_mask' , _lowerCAmelCase ) self.assertNotIn('decoder_input_ids' , _lowerCAmelCase ) self.assertNotIn('decoder_attention_mask' , _lowerCAmelCase ) def __a ( self ): _lowercase : Optional[int] = self.perceiver_tokenizer _lowercase : Optional[Any] = [ 'Summary of the text.', 'Another summary.', ] _lowercase : Optional[int] = tokenizer( text_target=_lowerCAmelCase , max_length=3_2 , padding='max_length' , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) self.assertEqual(3_2 , targets['input_ids'].shape[1] ) def __a ( self ): # safety check on max_len default value so we are sure the test works _lowercase : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test _lowercase : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : Dict = tempfile.mkdtemp() _lowercase : Tuple = ' He is very happy, UNwant\u00E9d,running' _lowercase : Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) _lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase ) _lowercase : Optional[Any] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) shutil.rmtree(_lowerCAmelCase ) _lowercase : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : List[str] = tempfile.mkdtemp() _lowercase : int = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) _lowercase : Any = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _lowercase : Tuple = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) _lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase ) _lowercase : Tuple = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) _lowercase : List[Any] = tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _lowercase : List[str] = json.load(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _lowercase : Tuple = json.load(_lowerCAmelCase ) _lowercase : Any = [F"""<extra_id_{i}>""" for i in range(1_2_5 )] _lowercase : str = added_tokens_extra_ids + [ 'an_additional_special_token' ] _lowercase : Optional[int] = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_lowerCAmelCase , _lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_lowerCAmelCase , _lowerCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowercase : Optional[int] = tokenizer_class.from_pretrained( _lowerCAmelCase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowercase : int = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_lowerCAmelCase )] _lowercase : Tuple = tokenizer_class.from_pretrained( _lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __a ( self ): _lowercase : str = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , '�' ) def __a ( self ): pass def __a ( self ): pass def __a ( self ): pass def __a ( self ): pass def __a ( self ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens _lowercase : List[str] = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _lowercase : Optional[Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] _lowercase : Optional[Any] = tokenizer.convert_tokens_to_string(_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
677
0
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
720
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["ConditionalDetrFeatureExtractor"] UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu UpperCamelCase = False class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __a ( self ): return 1_2 @property def __a ( self ): return 1_2 @property def __a ( self ): return 3_2 @property def __a ( self ): torch.manual_seed(0 ) _lowercase : List[Any] = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def __a ( self ): _lowercase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def __a ( self ): torch.manual_seed(0 ) _lowercase : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(_lowercase ) @property def __a ( self ): torch.manual_seed(0 ) _lowercase : Union[str, Any] = 1_2 _lowercase : Tuple = 1_2 _lowercase : Tuple = { """attention_bias""": True, """cross_attention_dim""": 3_2, """attention_head_dim""": height * width, """num_attention_heads""": 1, """num_vector_embeds""": self.num_embed, """num_embeds_ada_norm""": self.num_embeds_ada_norm, """norm_num_groups""": 3_2, """sample_size""": width, """activation_fn""": """geglu-approximate""", } _lowercase : Optional[Any] = TransformeraDModel(**_lowercase ) return model def __a ( self ): _lowercase : str = """cpu""" _lowercase : List[str] = self.dummy_vqvae _lowercase : Any = self.dummy_text_encoder _lowercase : Tuple = self.dummy_tokenizer _lowercase : int = self.dummy_transformer _lowercase : int = VQDiffusionScheduler(self.num_embed ) _lowercase : Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase ) _lowercase : Optional[Any] = VQDiffusionPipeline( vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , ) _lowercase : int = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) _lowercase : List[Any] = """teddy bear playing in the pool""" _lowercase : Dict = torch.Generator(device=_lowercase ).manual_seed(0 ) _lowercase : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type='np' ) _lowercase : Optional[int] = output.images _lowercase : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 ) _lowercase : Dict = pipe( [prompt] , generator=_lowercase , output_type='np' , return_dict=_lowercase , num_inference_steps=2 )[0] _lowercase : List[Any] = image[0, -3:, -3:, -1] _lowercase : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) _lowercase : Dict = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self ): _lowercase : int = """cpu""" _lowercase : List[Any] = self.dummy_vqvae _lowercase : Optional[int] = self.dummy_text_encoder _lowercase : List[Any] = self.dummy_tokenizer _lowercase : Union[str, Any] = self.dummy_transformer _lowercase : str = VQDiffusionScheduler(self.num_embed ) _lowercase : List[Any] = LearnedClassifierFreeSamplingEmbeddings( learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) _lowercase : Union[str, Any] = VQDiffusionPipeline( vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , ) _lowercase : Any = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) _lowercase : Tuple = """teddy bear playing in the pool""" _lowercase : str = torch.Generator(device=_lowercase ).manual_seed(0 ) _lowercase : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type='np' ) _lowercase : Dict = output.images _lowercase : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 ) _lowercase : Any = pipe( [prompt] , generator=_lowercase , output_type='np' , return_dict=_lowercase , num_inference_steps=2 )[0] _lowercase : Optional[Any] = image[0, -3:, -3:, -1] _lowercase : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) _lowercase : int = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ): _lowercase : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' ) _lowercase : str = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' ) _lowercase : Optional[Any] = pipeline.to(_lowercase ) pipeline.set_progress_bar_config(disable=_lowercase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though _lowercase : Any = torch.Generator(device=_lowercase ).manual_seed(0 ) _lowercase : Optional[int] = pipeline( 'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=_lowercase , output_type='np' , ) _lowercase : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
721
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Tuple = "ClapFeatureExtractor" _UpperCamelCase : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): _lowercase : str = kwargs.pop('sampling_rate' , _lowerCAmelCase ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: _lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if audios is not None: _lowercase : Any = self.feature_extractor( _lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if text is not None and audios is not None: _lowercase : Union[str, Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase ) def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property def __a ( self ): _lowercase : Dict = self.tokenizer.model_input_names _lowercase : Any = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
677
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
700
from __future__ import annotations from typing import Any class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase ): _lowercase : Any = num_of_nodes _lowercase : list[list[int]] = [] _lowercase : dict[int, int] = {} def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): self.m_edges.append([u_node, v_node, weight] ) def __a ( self , _lowerCAmelCase ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def __a ( self , _lowerCAmelCase ): if self.m_component[u_node] != u_node: for k in self.m_component: _lowercase : Optional[int] = self.find_component(_lowerCAmelCase ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): if component_size[u_node] <= component_size[v_node]: _lowercase : str = v_node component_size[v_node] += component_size[u_node] self.set_component(_lowerCAmelCase ) elif component_size[u_node] >= component_size[v_node]: _lowercase : Any = self.find_component(_lowerCAmelCase ) component_size[u_node] += component_size[v_node] self.set_component(_lowerCAmelCase ) def __a ( self ): _lowercase : Any = [] _lowercase : Optional[Any] = 0 _lowercase : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) _lowercase : str = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: _lowercase , _lowercase , _lowercase : List[str] = edge _lowercase : Union[str, Any] = self.m_component[u] _lowercase : Union[str, Any] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): _lowercase : str = [u, v, w] for edge in minimum_weight_edge: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowercase , _lowercase , _lowercase : int = edge _lowercase : Optional[int] = self.m_component[u] _lowercase : Optional[Any] = self.m_component[v] if u_component != v_component: mst_weight += w self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 _lowercase : str = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def __magic_name__ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
677
0
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : Tuple = SwinConfig(image_size=192 ) if "base" in model_name: _lowercase : str = 6 _lowercase : List[str] = 128 _lowercase : Optional[int] = (2, 2, 18, 2) _lowercase : List[str] = (4, 8, 16, 32) elif "large" in model_name: _lowercase : Dict = 12 _lowercase : List[str] = 192 _lowercase : Any = (2, 2, 18, 2) _lowercase : int = (6, 12, 24, 48) else: raise ValueError('Model not supported, only supports base and large variants' ) _lowercase : str = window_size _lowercase : List[str] = embed_dim _lowercase : List[str] = depths _lowercase : Optional[int] = num_heads return config def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: if "encoder.mask_token" in name: _lowercase : Any = name.replace('encoder.mask_token' , 'embeddings.mask_token' ) if "encoder.patch_embed.proj" in name: _lowercase : Optional[int] = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "encoder.patch_embed.norm" in name: _lowercase : str = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' ) if "attn.proj" in name: _lowercase : Union[str, Any] = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: _lowercase : List[Any] = name.replace('attn' , 'attention.self' ) if "norm1" in name: _lowercase : Optional[Any] = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: _lowercase : Optional[int] = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: _lowercase : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: _lowercase : List[Any] = name.replace('mlp.fc2' , 'output.dense' ) if name == "encoder.norm.weight": _lowercase : Optional[int] = 'layernorm.weight' if name == "encoder.norm.bias": _lowercase : Tuple = 'layernorm.bias' if "decoder" in name: pass else: _lowercase : Optional[Any] = 'swin.' + name return name def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: for key in orig_state_dict.copy().keys(): _lowercase : str = orig_state_dict.pop(__SCREAMING_SNAKE_CASE ) if "attn_mask" in key: pass elif "qkv" in key: _lowercase : List[Any] = key.split('.' ) _lowercase : Tuple = int(key_split[2] ) _lowercase : List[str] = int(key_split[4] ) _lowercase : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _lowercase : Optional[int] = val[:dim, :] _lowercase : List[Any] = val[ dim : dim * 2, : ] _lowercase : Any = val[-dim:, :] else: _lowercase : Optional[int] = val[ :dim ] _lowercase : Tuple = val[ dim : dim * 2 ] _lowercase : Any = val[ -dim: ] else: _lowercase : Any = val return orig_state_dict def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: _lowercase : str = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] _lowercase : str = get_swin_config(__SCREAMING_SNAKE_CASE ) _lowercase : Tuple = SwinForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) model.eval() _lowercase : Optional[int] = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) model.load_state_dict(__SCREAMING_SNAKE_CASE ) _lowercase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' _lowercase : Optional[Any] = ViTImageProcessor(size={'height': 192, 'width': 192} ) _lowercase : Tuple = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) _lowercase : Tuple = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) with torch.no_grad(): _lowercase : List[Any] = model(**__SCREAMING_SNAKE_CASE ).logits print(outputs.keys() ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: print(F"""Pushing model and image processor for {model_name} to hub""" ) model.push_to_hub(F"""microsoft/{model_name}""" ) image_processor.push_to_hub(F"""microsoft/{model_name}""" ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="swin-base-simmim-window6-192", type=str, choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"], help="Name of the Swin SimMIM model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth", type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) UpperCamelCase = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
701
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : Tuple = {} _lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids'] _lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] ) return output UpperCamelCase = HfArgumentParser(PretokenizationArguments) UpperCamelCase = parser.parse_args() if args.num_workers is None: UpperCamelCase = multiprocessing.cpu_count() UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCamelCase = time.time() UpperCamelCase = load_dataset(args.dataset_name, split="train") print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() UpperCamelCase = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
677
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase = { "configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTBigCodeForSequenceClassification", "GPTBigCodeForTokenClassification", "GPTBigCodeForCausalLM", "GPTBigCodeModel", "GPTBigCodePreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
702
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration} UpperCamelCase = {"facebook/bart-base": BartTokenizer} def __magic_name__ ( ) -> str: _lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' ) parser.add_argument( '--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' ) parser.add_argument( '--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , ) parser.add_argument( '--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=( 'Number of beams to use for evaluation. This argument will be ' 'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.' ) , ) parser.add_argument( '--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , ) parser.add_argument( '--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , ) parser.add_argument( '--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , ) parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' ) _lowercase : Optional[Any] = parser.parse_args() return args def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]: _lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) _lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ) if model_name in ["facebook/bart-base"]: _lowercase : Dict = 0 _lowercase : Optional[int] = None _lowercase : Union[str, Any] = 0 return huggingface_model, tokenizer def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: model.eval() _lowercase : List[Any] = None _lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) ) with torch.no_grad(): _lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.' _lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device ) _lowercase : str = model.generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( SCREAMING_SNAKE_CASE , ( inputs['input_ids'], inputs['attention_mask'], num_beams, max_length, model.config.decoder_start_token_id, ) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={ 'input_ids': {0: 'batch', 1: 'seq'}, 'output_ids': {0: 'batch', 1: 'seq_out'}, } , example_outputs=SCREAMING_SNAKE_CASE , ) logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) ) _lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) ) logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) ) _lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE ) _lowercase : Union[str, Any] = ort_sess.run( SCREAMING_SNAKE_CASE , { 'input_ids': inputs['input_ids'].cpu().numpy(), 'attention_mask': inputs['attention_mask'].cpu().numpy(), 'num_beams': np.array(SCREAMING_SNAKE_CASE ), 'max_length': np.array(SCREAMING_SNAKE_CASE ), 'decoder_start_token_id': np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info('Model outputs from torch and ONNX Runtime are similar.' ) logger.info('Success.' ) def __magic_name__ ( ) -> Any: _lowercase : Dict = parse_args() _lowercase : Union[str, Any] = 5 _lowercase : Union[str, Any] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() _lowercase : Optional[Any] = torch.device(args.device ) _lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE ) if model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' ) model.to(SCREAMING_SNAKE_CASE ) if args.max_length: _lowercase : Any = args.max_length if args.num_beams: _lowercase : List[str] = args.num_beams if args.output_file_path: _lowercase : Union[str, Any] = args.output_file_path else: _lowercase : Tuple = 'BART.onnx' logger.info('Exporting model to ONNX' ) export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
677
0
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' _lowercase : Tuple = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _lowercase : Any = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' ) _lowercase : str = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' ) _lowercase : Optional[int] = key.replace('heads.cmd.itm_head.cls' , 'itm_head' ) _lowercase : Dict = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' ) _lowercase : Optional[int] = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' ) _lowercase : Optional[int] = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' ) _lowercase : Tuple = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' ) _lowercase : Any = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' ) _lowercase : Optional[Any] = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' ) _lowercase : Any = key.replace('image_encoder.module' , 'flava.image_model' ) _lowercase : Dict = key.replace('text_encoder.module' , 'flava.text_model' ) _lowercase : Union[str, Any] = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' ) _lowercase : Dict = key.replace('mm_encoder.module' , 'flava.multimodal_model' ) _lowercase : Dict = key.replace('text_projection' , 'flava.text_projection' ) _lowercase : Optional[int] = key.replace('image_projection' , 'flava.image_projection' ) _lowercase : Optional[Any] = value.float() for key, value in codebook_state_dict.items(): _lowercase : List[Any] = value return upgrade @torch.no_grad() def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Tuple: '''simple docstring''' if config_path is not None: _lowercase : List[str] = FlavaConfig.from_pretrained(snake_case_ ) else: _lowercase : List[str] = FlavaConfig() _lowercase : Dict = FlavaForPreTraining(snake_case_ ).eval() _lowercase : Optional[int] = convert_dalle_checkpoint(snake_case_ , snake_case_ , save_checkpoint=snake_case_ ) if os.path.exists(snake_case_ ): _lowercase : Any = torch.load(snake_case_ , map_location='cpu' ) else: _lowercase : List[Any] = torch.hub.load_state_dict_from_url(snake_case_ , map_location='cpu' ) _lowercase : Union[str, Any] = upgrade_state_dict(snake_case_ , snake_case_ ) hf_model.load_state_dict(snake_case_ ) _lowercase : Dict = hf_model.state_dict() _lowercase : Union[str, Any] = count_parameters(snake_case_ ) _lowercase : str = count_parameters(snake_case_ ) + count_parameters(snake_case_ ) assert torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) hf_model.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") UpperCamelCase = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
703
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCamelCase : Union[str, Any] = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) _UpperCamelCase : List[Any] = ( { "feature-extraction": TFMobileBertModel, "fill-mask": TFMobileBertForMaskedLM, "question-answering": TFMobileBertForQuestionAnswering, "text-classification": TFMobileBertForSequenceClassification, "token-classification": TFMobileBertForTokenClassification, "zero-shot": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) _UpperCamelCase : int = False _UpperCamelCase : Optional[int] = False def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ): _lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if return_labels: if model_class in get_values(_lowerCAmelCase ): _lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class lowerCAmelCase_ ( __snake_case ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): _lowercase : Optional[Any] = parent _lowercase : str = batch_size _lowercase : Optional[int] = seq_length _lowercase : Tuple = is_training _lowercase : List[Any] = use_input_mask _lowercase : Optional[Any] = use_token_type_ids _lowercase : Any = use_labels _lowercase : str = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[int] = intermediate_size _lowercase : Tuple = hidden_act _lowercase : Dict = hidden_dropout_prob _lowercase : Optional[int] = attention_probs_dropout_prob _lowercase : Tuple = max_position_embeddings _lowercase : List[str] = type_vocab_size _lowercase : Optional[Any] = type_sequence_label_size _lowercase : List[Any] = initializer_range _lowercase : List[str] = num_labels _lowercase : Union[str, Any] = num_choices _lowercase : List[str] = scope _lowercase : Union[str, Any] = embedding_size def __a ( self ): _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : Optional[int] = None if self.use_input_mask: _lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : int = None if self.use_token_type_ids: _lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : Dict = None _lowercase : Any = None _lowercase : int = None if self.use_labels: _lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : Optional[Any] = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase ) _lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : Union[str, Any] = model(_lowerCAmelCase ) _lowercase : Tuple = [input_ids, input_mask] _lowercase : str = model(_lowerCAmelCase ) _lowercase : List[str] = model(_lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase ) _lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : int = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase ) _lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : Optional[int] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase ) _lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : Union[str, Any] = model(_lowerCAmelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[int] = self.num_labels _lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase ) _lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : List[str] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = self.num_choices _lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase ) _lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowercase : str = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } _lowercase : Union[str, Any] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : List[str] = self.num_labels _lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase ) _lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : List[str] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase ) _lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : int = model(_lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self ): _lowercase : List[str] = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : int = config_and_inputs _lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict def __a ( self ): _lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self ) _lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase ) def __a ( self ): _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase ) def __a ( self ): _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase ) def __a ( self ): _lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase ) def __a ( self ): _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase ) def __a ( self ): _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase ) def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase ) @slow def __a ( self ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @slow def __a ( self ): _lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' ) _lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) _lowercase : List[str] = model(_lowerCAmelCase )[0] _lowercase : str = [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , _lowerCAmelCase ) _lowercase : List[Any] = tf.constant( [ [ [-4.5_91_95_47, -9.24_82_95, -9.64_52_56], [-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37], [-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
677
0
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( lowercase__ ): _UpperCamelCase : List[str] = ["input_features", "attention_mask"] def __init__( self , _lowerCAmelCase=8_0 , _lowerCAmelCase=1_6_0_0_0 , _lowerCAmelCase=8_0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ): super().__init__(feature_size=_lowerCAmelCase , sampling_rate=_lowerCAmelCase , padding_value=_lowerCAmelCase , **_lowerCAmelCase ) _lowercase : List[Any] = num_mel_bins _lowercase : Dict = do_ceptral_normalize _lowercase : List[str] = normalize_means _lowercase : List[str] = normalize_vars _lowercase : Any = True def __a ( self , _lowerCAmelCase , ): _lowercase : Optional[int] = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers _lowercase : List[Any] = torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ) _lowercase : int = ta_kaldi.fbank(_lowerCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def __a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = True , _lowerCAmelCase = 0.0 , ): # make sure we normalize float32 arrays if normalize_means: _lowercase : List[str] = x[:input_length].mean(axis=0 ) _lowercase : Tuple = np.subtract(_lowerCAmelCase , _lowerCAmelCase ) if normalize_vars: _lowercase : Dict = x[:input_length].std(axis=0 ) _lowercase : List[str] = np.divide(_lowerCAmelCase , _lowerCAmelCase ) if input_length < x.shape[0]: _lowercase : Optional[Any] = padding_value # make sure array is in float32 _lowercase : Optional[Any] = x.astype(np.floataa ) return x def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(_lowerCAmelCase , _lowerCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(_lowerCAmelCase , _lowerCAmelCase ) ] def __call__( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _lowercase : List[str] = isinstance(_lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) _lowercase : Any = is_batched_numpy or ( isinstance(_lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _lowercase : Any = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_lowerCAmelCase , np.ndarray ): _lowercase : Dict = np.asarray(_lowerCAmelCase , dtype=np.floataa ) elif isinstance(_lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _lowercase : List[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _lowercase : Any = [raw_speech] # extract fbank features _lowercase : List[Any] = [self._extract_fbank_features(_lowerCAmelCase ) for waveform in raw_speech] # convert into correct format for padding _lowercase : Union[str, Any] = BatchFeature({'input_features': features} ) _lowercase : Any = self.pad( _lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , **_lowerCAmelCase , ) # make sure list is in array format _lowercase : List[Any] = padded_inputs.get('input_features' ) if isinstance(input_features[0] , _lowerCAmelCase ): _lowercase : Union[str, Any] = [np.asarray(_lowerCAmelCase , dtype=np.floataa ) for feature in input_features] _lowercase : List[Any] = padded_inputs.get('attention_mask' ) if attention_mask is not None: _lowercase : List[Any] = [np.asarray(_lowerCAmelCase , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: _lowercase : Optional[Any] = ( np.array(_lowerCAmelCase , dtype=np.intaa ) if self._get_padding_strategies(_lowerCAmelCase , max_length=_lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _lowercase : Any = self.normalize( padded_inputs['input_features'] , attention_mask=_lowerCAmelCase ) if return_tensors is not None: _lowercase : List[str] = padded_inputs.convert_to_tensors(_lowerCAmelCase ) return padded_inputs
704
import qiskit def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> qiskit.result.counts.Counts: _lowercase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register _lowercase : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator _lowercase : Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase = single_qubit_measure(2, 2) print(f'''Total count for various states are: {counts}''')
677
0
from argparse import ArgumentParser from . import BaseTransformersCLICommand def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]: return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class lowerCAmelCase_ ( __SCREAMING_SNAKE_CASE ): @staticmethod def __a ( _lowerCAmelCase ): _lowercase : Any = parser.add_parser('download' ) download_parser.add_argument( '--cache-dir' , type=_a , default=_a , help='Path to location to store the models' ) download_parser.add_argument( '--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' ) download_parser.add_argument( '--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , ) download_parser.add_argument('model' , type=_a , help='Name of the model to download' ) download_parser.set_defaults(func=_a ) def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : int = model _lowercase : Tuple = cache _lowercase : Tuple = force _lowercase : str = trust_remote_code def __a ( self ): from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
705
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCamelCase = "platform" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict: if attention_mask is None: _lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ): _lowercase : List[str] = parent _lowercase : List[Any] = batch_size _lowercase : Optional[Any] = seq_length _lowercase : Optional[Any] = is_training _lowercase : Tuple = use_labels _lowercase : Dict = vocab_size _lowercase : Any = hidden_size _lowercase : Optional[Any] = num_hidden_layers _lowercase : Union[str, Any] = num_attention_heads _lowercase : Tuple = intermediate_size _lowercase : Any = hidden_act _lowercase : Optional[Any] = hidden_dropout_prob _lowercase : Tuple = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : str = eos_token_id _lowercase : int = pad_token_id _lowercase : Tuple = bos_token_id _lowercase : List[Any] = initializer_range def __a ( self ): _lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) _lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) _lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 ) _lowercase : Tuple = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , ) _lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return config, inputs_dict def __a ( self ): _lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = 2_0 _lowercase : List[Any] = model_class_name(_lowerCAmelCase ) _lowercase : List[Any] = model.encode(inputs_dict['input_ids'] ) _lowercase , _lowercase : int = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase ) _lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) _lowercase : int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowercase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _lowercase : int = model.decode( decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , ) _lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase ) _lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Dict = 2_0 _lowercase : Any = model_class_name(_lowerCAmelCase ) _lowercase : int = model.encode(inputs_dict['input_ids'] ) _lowercase , _lowercase : Optional[int] = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _lowercase : Union[str, Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase ) _lowercase : int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowercase : List[Any] = model.decode( decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _lowercase : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase ) _lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class lowerCAmelCase_ ( unittest.TestCase ): _UpperCamelCase : Tuple = 99 def __a ( self ): _lowercase : Dict = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) _lowercase : Union[str, Any] = input_ids.shape[0] _lowercase : Optional[int] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def __a ( self ): _lowercase , _lowercase , _lowercase : int = self._get_config_and_data() _lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase ) _lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase ) _lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['logits'].shape , _lowerCAmelCase ) def __a ( self ): _lowercase : Union[str, Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) _lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase ) _lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa ) _lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa ) _lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ) _lowercase : Tuple = (*summary.shape, config.vocab_size) self.assertEqual(outputs['logits'].shape , _lowerCAmelCase ) def __a ( self ): _lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa ) _lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 ) _lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum() _lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_lowerCAmelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ): _UpperCamelCase : int = True _UpperCamelCase : Any = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) _UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def __a ( self ): _lowercase : List[str] = FlaxBlenderbotSmallModelTester(self ) def __a ( self ): _lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __a ( self ): _lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __a ( self ): _lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) _lowercase : str = model_class(_lowerCAmelCase ) @jax.jit def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ): return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) with self.subTest('JIT Enabled' ): _lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def __a ( self ): _lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowercase : int = model_class(_lowerCAmelCase ) _lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) _lowercase : List[Any] = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): return model.decode( decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , ) with self.subTest('JIT Enabled' ): _lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __a ( self ): for model_class_name in self.all_model_classes: _lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id _lowercase : int = model(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase )
677
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class lowerCAmelCase_ ( __a ): _UpperCamelCase : List[Any] = 42 class lowerCAmelCase_ ( __a , __a ): @register_to_config def __init__( self , _lowerCAmelCase = 1_6 , _lowerCAmelCase = 8_8 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 1 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 3_2 , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = "geglu" , _lowerCAmelCase = True , _lowerCAmelCase = True , ): super().__init__() _lowercase : List[str] = num_attention_heads _lowercase : int = attention_head_dim _lowercase : Optional[int] = num_attention_heads * attention_head_dim _lowercase : Any = in_channels _lowercase : List[str] = torch.nn.GroupNorm(num_groups=a_ , num_channels=a_ , eps=1E-6 , affine=a_ ) _lowercase : Dict = nn.Linear(a_ , a_ ) # 3. Define transformers blocks _lowercase : List[Any] = nn.ModuleList( [ BasicTransformerBlock( a_ , a_ , a_ , dropout=a_ , cross_attention_dim=a_ , activation_fn=a_ , attention_bias=a_ , double_self_attention=a_ , norm_elementwise_affine=a_ , ) for d in range(a_ ) ] ) _lowercase : Tuple = nn.Linear(a_ , a_ ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=1 , _lowerCAmelCase=None , _lowerCAmelCase = True , ): _lowercase : Any = hidden_states.shape _lowercase : List[str] = batch_frames // num_frames _lowercase : Tuple = hidden_states _lowercase : List[str] = hidden_states[None, :].reshape(a_ , a_ , a_ , a_ , a_ ) _lowercase : Dict = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) _lowercase : Tuple = self.norm(a_ ) _lowercase : Optional[int] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , a_ , a_ ) _lowercase : Any = self.proj_in(a_ ) # 2. Blocks for block in self.transformer_blocks: _lowercase : List[str] = block( a_ , encoder_hidden_states=a_ , timestep=a_ , cross_attention_kwargs=a_ , class_labels=a_ , ) # 3. Output _lowercase : Tuple = self.proj_out(a_ ) _lowercase : List[str] = ( hidden_states[None, None, :] .reshape(a_ , a_ , a_ , a_ , a_ ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) _lowercase : List[Any] = hidden_states.reshape(a_ , a_ , a_ , a_ ) _lowercase : List[str] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=a_ )
706
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Dict = "longformer" def __init__( self , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 7_6_8 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 3_0_7_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ): super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) _lowercase : Optional[int] = attention_window _lowercase : str = sep_token_id _lowercase : Optional[Any] = bos_token_id _lowercase : List[Any] = eos_token_id _lowercase : Optional[Any] = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Optional[int] = num_attention_heads _lowercase : List[str] = hidden_act _lowercase : List[str] = intermediate_size _lowercase : List[Any] = hidden_dropout_prob _lowercase : str = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : int = type_vocab_size _lowercase : Optional[int] = initializer_range _lowercase : List[Any] = layer_norm_eps _lowercase : List[str] = onnx_export class lowerCAmelCase_ ( __snake_case ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ): super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowercase : str = True @property def __a ( self ): if self.task == "multiple-choice": _lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowercase : int = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('global_attention_mask', dynamic_axis), ] ) @property def __a ( self ): _lowercase : Optional[int] = super().outputs if self.task == "default": _lowercase : List[str] = {0: 'batch'} return outputs @property def __a ( self ): return 1E-4 @property def __a ( self ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 1_4 ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ): _lowercase : int = super().generate_dummy_inputs( preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly _lowercase : str = torch.zeros_like(inputs['input_ids'] ) # make every second token global _lowercase : Any = 1 return inputs
677
0
'''simple docstring''' import os def __magic_name__ ( ) -> List[Any]: with open(os.path.dirname(__snake_case ) + '/grid.txt' ) as f: _lowercase : List[str] = [] # noqa: E741 for _ in range(20 ): l.append([int(__snake_case ) for x in f.readline().split()] ) _lowercase : Tuple = 0 # right for i in range(20 ): for j in range(17 ): _lowercase : int = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: _lowercase : str = temp # down for i in range(17 ): for j in range(20 ): _lowercase : Optional[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: _lowercase : Dict = temp # diagonal 1 for i in range(17 ): for j in range(17 ): _lowercase : Optional[int] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: _lowercase : List[str] = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): _lowercase : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: _lowercase : str = temp return maximum if __name__ == "__main__": print(solution())
707
from __future__ import annotations def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool: return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
677
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): _UpperCamelCase : List[str] = ViTImageProcessor if is_vision_available() else None @property def __a ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __a ( self ): _lowercase : Tuple = (3, 3_2, 1_2_8) _lowercase : Any = tempfile.mkdtemp() # fmt: off _lowercase : str = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _lowercase : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) _lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase_ ) + '\n' ) _lowercase : Optional[Any] = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 3_2, """width""": 1_2_8}, } _lowercase : Union[str, Any] = os.path.join(self.tmpdirname , lowercase_ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(lowercase_ , lowercase_ ) def __a ( self , **_lowerCAmelCase ): return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase_ ) def __a ( self , **_lowerCAmelCase ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ ) def __a ( self ): shutil.rmtree(self.tmpdirname ) def __a ( self ): _lowercase : int = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta ) _lowercase : Dict = Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) return image_input def __a ( self ): _lowercase : Optional[int] = self.get_tokenizer() _lowercase : Optional[int] = self.get_image_processor() _lowercase : Dict = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor.save_pretrained(self.tmpdirname ) _lowercase : Optional[int] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def __a ( self ): _lowercase : Any = self.get_tokenizer() _lowercase : Optional[int] = self.get_image_processor() _lowercase : int = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor.save_pretrained(self.tmpdirname ) _lowercase : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowercase : Optional[int] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) _lowercase : int = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def __a ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Tuple = self.get_tokenizer() _lowercase : Dict = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _lowercase : Tuple = self.prepare_image_inputs() _lowercase : List[str] = image_processor(lowercase_ , return_tensors='np' ) _lowercase : Optional[int] = processor(images=lowercase_ , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __a ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : str = self.get_tokenizer() _lowercase : Tuple = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _lowercase : Any = """test""" _lowercase : str = processor(text=lowercase_ ) _lowercase : Union[str, Any] = tokenizer(lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __a ( self ): _lowercase : Union[str, Any] = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : Union[str, Any] = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _lowercase : Optional[Any] = """test""" _lowercase : int = self.prepare_image_inputs() _lowercase : Dict = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def __a ( self ): _lowercase : Tuple = self.get_image_processor() _lowercase : Tuple = self.get_tokenizer() _lowercase : Union[str, Any] = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _lowercase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] _lowercase : List[str] = processor.char_decode(lowercase_ ) _lowercase : Optional[int] = tokenizer.batch_decode(lowercase_ ) _lowercase : List[str] = [seq.replace(' ' , '' ) for seq in decoded_tok] self.assertListEqual(lowercase_ , lowercase_ ) def __a ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : Any = self.get_tokenizer() _lowercase : Dict = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _lowercase : int = None _lowercase : Optional[int] = self.prepare_image_inputs() _lowercase : Any = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def __a ( self ): _lowercase : Optional[Any] = self.get_image_processor() _lowercase : str = self.get_tokenizer() _lowercase : Any = MgpstrProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _lowercase : Optional[int] = torch.randn(1 , 2_7 , 3_8 ) _lowercase : List[str] = torch.randn(1 , 2_7 , 5_0_2_5_7 ) _lowercase : List[Any] = torch.randn(1 , 2_7 , 3_0_5_2_2 ) _lowercase : Dict = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
708
import math def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ) -> list: _lowercase : List[str] = end or len(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _lowercase : Dict = i _lowercase : str = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _lowercase : Optional[Any] = array[temp_index - 1] temp_index -= 1 _lowercase : Optional[Any] = temp_index_value return array def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: # Max Heap _lowercase : List[str] = index _lowercase : List[str] = 2 * index + 1 # Left Node _lowercase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _lowercase : Any = left_index if right_index < heap_size and array[largest] < array[right_index]: _lowercase : str = right_index if largest != index: _lowercase , _lowercase : List[str] = array[largest], array[index] heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list: _lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) for i in range(n // 2 , -1 , -1 ): heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in range(n - 1 , 0 , -1 ): _lowercase , _lowercase : List[Any] = array[0], array[i] heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE ) return array def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: _lowercase : Optional[Any] = low _lowercase : Tuple = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _lowercase , _lowercase : Tuple = array[j], array[i] i += 1 def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list: if len(SCREAMING_SNAKE_CASE ) == 0: return array _lowercase : List[str] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) ) _lowercase : str = 16 return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list: while end - start > size_threshold: if max_depth == 0: return heap_sort(SCREAMING_SNAKE_CASE ) max_depth -= 1 _lowercase : int = median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 ) _lowercase : str = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = p return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = input("Enter numbers separated by a comma : ").strip() UpperCamelCase = [float(item) for item in user_input.split(",")] print(sort(unsorted))
677
0
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase_ ( a__ ): def __a ( self ): _lowercase : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'embed_dim' ) ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'num_heads' ) ) class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=6_4 , _lowerCAmelCase=3 , _lowerCAmelCase=[1_6, 4_8, 9_6] , _lowerCAmelCase=[1, 3, 6] , _lowerCAmelCase=[1, 2, 1_0] , _lowerCAmelCase=[7, 3, 3] , _lowerCAmelCase=[4, 2, 2] , _lowerCAmelCase=[2, 1, 1] , _lowerCAmelCase=[2, 2, 2] , _lowerCAmelCase=[False, False, True] , _lowerCAmelCase=[0.0, 0.0, 0.0] , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=2 , ): _lowercase : List[str] = parent _lowercase : Tuple = batch_size _lowercase : str = image_size _lowercase : List[Any] = patch_sizes _lowercase : List[Any] = patch_stride _lowercase : int = patch_padding _lowercase : int = is_training _lowercase : str = use_labels _lowercase : List[str] = num_labels _lowercase : str = num_channels _lowercase : Optional[int] = embed_dim _lowercase : Optional[Any] = num_heads _lowercase : Tuple = stride_kv _lowercase : Union[str, Any] = depth _lowercase : Optional[Any] = cls_token _lowercase : List[Any] = attention_drop_rate _lowercase : List[str] = initializer_range _lowercase : str = layer_norm_eps def __a ( self ): _lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowercase : Any = None if self.use_labels: # create a random int32 tensor of given shape _lowercase : Dict = ids_tensor([self.batch_size] , self.num_labels ) _lowercase : int = self.get_config() return config, pixel_values, labels def __a ( self ): return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : List[str] = TFCvtModel(config=lowerCAmelCase__ ) _lowercase : Optional[Any] = model(lowerCAmelCase__ , training=lowerCAmelCase__ ) _lowercase : List[str] = (self.image_size, self.image_size) _lowercase : List[str] = image_size[0], image_size[1] for i in range(len(self.depth ) ): _lowercase : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _lowercase : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Tuple = self.num_labels _lowercase : Any = TFCvtForImageClassification(lowerCAmelCase__ ) _lowercase : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self ): _lowercase : List[str] = self.prepare_config_and_inputs() _lowercase : Optional[int] = config_and_inputs _lowercase : int = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): _UpperCamelCase : List[str] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () _UpperCamelCase : List[Any] = ( {"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification} if is_tf_available() else {} ) _UpperCamelCase : Optional[Any] = False _UpperCamelCase : List[str] = False _UpperCamelCase : Optional[Any] = False _UpperCamelCase : int = False _UpperCamelCase : Dict = False def __a ( self ): _lowercase : Dict = TFCvtModelTester(self ) _lowercase : List[Any] = TFCvtConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 ) def __a ( self ): self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='Cvt does not output attentions' ) def __a ( self ): pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def __a ( self ): pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def __a ( self ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) def __a ( self ): super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __a ( self ): super().test_keras_fit() @unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' ) def __a ( self ): _lowercase : Optional[int] = tf.keras.mixed_precision.Policy('mixed_float16' ) tf.keras.mixed_precision.set_global_policy(lowerCAmelCase__ ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('float32' ) def __a ( self ): _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : List[str] = model_class(lowerCAmelCase__ ) _lowercase : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase : Dict = [*signature.parameters.keys()] _lowercase : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __a ( self ): def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Union[str, Any] = model_class(lowerCAmelCase__ ) _lowercase : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) _lowercase : Dict = outputs.hidden_states _lowercase : Any = len(self.model_tester.depth ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : Dict = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowercase : Optional[int] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ): _lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __a ( self ): _lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) @slow def __a ( self ): for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : Tuple = TFCvtModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def __magic_name__ ( ) -> int: _lowercase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def __a ( self ): return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __a ( self ): _lowercase : Union[str, Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _lowercase : Any = self.default_image_processor _lowercase : Any = prepare_img() _lowercase : str = image_processor(images=lowerCAmelCase__ , return_tensors='tf' ) # forward pass _lowercase : List[str] = model(**lowerCAmelCase__ ) # verify the logits _lowercase : Dict = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) _lowercase : str = tf.constant([0.92_85, 0.90_15, -0.31_50] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase__ , atol=1E-4 ) )
709
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["CLIPFeatureExtractor"] UpperCamelCase = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self , _lowerCAmelCase ): _lowercase : Any = parent def __a ( self ): return {} def __magic_name__ ( ) -> Any: _lowercase : List[Any] = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>' _lowercase : int = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n ' return [html_string_a, html_string_a] @require_bsa class lowerCAmelCase_ ( UpperCAmelCase__ , unittest.TestCase ): _UpperCamelCase : int = MarkupLMFeatureExtractor if is_bsa_available() else None def __a ( self ): _lowercase : Optional[int] = MarkupLMFeatureExtractionTester(self ) @property def __a ( self ): return self.feature_extract_tester.prepare_feat_extract_dict() def __a ( self ): _lowercase : Optional[int] = self.feature_extraction_class() # Test not batched input _lowercase : List[Any] = get_html_strings()[0] _lowercase : int = feature_extractor(__lowerCAmelCase ) # fmt: off _lowercase : List[Any] = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']] _lowercase : Optional[int] = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']] # fmt: on self.assertEqual(encoding.nodes , __lowerCAmelCase ) self.assertEqual(encoding.xpaths , __lowerCAmelCase ) # Test batched _lowercase : List[str] = get_html_strings() _lowercase : Dict = feature_extractor(__lowerCAmelCase ) # fmt: off _lowercase : str = expected_nodes + [['My First Heading', 'My first paragraph.']] _lowercase : Tuple = expected_xpaths + [['/html/body/h1', '/html/body/p']] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , __lowerCAmelCase ) self.assertEqual(encoding.xpaths , __lowerCAmelCase )
710
from collections.abc import Sequence def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: _lowercase : Optional[Any] = 0.0 for coeff in reversed(SCREAMING_SNAKE_CASE ): _lowercase : Optional[int] = result * x + coeff return result if __name__ == "__main__": UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCamelCase = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
677
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json", } class lowerCAmelCase_ ( __UpperCAmelCase ): _UpperCamelCase : Any = """switch_transformers""" _UpperCamelCase : Union[str, Any] = ["""past_key_values"""] _UpperCamelCase : Dict = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self , _lowerCAmelCase=3_2_1_2_8 , _lowerCAmelCase=7_6_8 , _lowerCAmelCase=6_4 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=6_4 , _lowerCAmelCase=1_2 , _lowerCAmelCase=3 , _lowerCAmelCase=1_2 , _lowerCAmelCase=3 , _lowerCAmelCase=1_2 , _lowerCAmelCase=8 , _lowerCAmelCase=False , _lowerCAmelCase=0.01 , _lowerCAmelCase="float32" , _lowerCAmelCase=False , _lowerCAmelCase=3_2 , _lowerCAmelCase=1_2_8 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-6 , _lowerCAmelCase=0.0_01 , _lowerCAmelCase=0.0_01 , _lowerCAmelCase=1.0 , _lowerCAmelCase="relu" , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=0 , _lowerCAmelCase=1 , **_lowerCAmelCase , ): _lowercase : List[Any] = vocab_size _lowercase : Any = d_model _lowercase : Any = d_kv _lowercase : Dict = d_ff _lowercase : List[Any] = num_sparse_encoder_layers _lowercase : Optional[int] = num_layers _lowercase : Dict = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _lowercase : Union[str, Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: _lowercase : Optional[Any] = self.num_layers // self.num_sparse_encoder_layers else: _lowercase : Tuple = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: _lowercase : List[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: _lowercase : Tuple = self.num_decoder_layers # HACK: this will create 0 sparse layers _lowercase : List[Any] = num_heads _lowercase : Optional[Any] = num_experts _lowercase : Dict = expert_capacity _lowercase : Optional[int] = router_bias _lowercase : Optional[int] = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) _lowercase : Optional[Any] = router_dtype _lowercase : Optional[int] = router_ignore_padding_tokens _lowercase : Any = relative_attention_num_buckets _lowercase : str = relative_attention_max_distance _lowercase : str = dropout_rate _lowercase : List[str] = layer_norm_epsilon _lowercase : Union[str, Any] = initializer_factor _lowercase : Optional[Any] = feed_forward_proj _lowercase : str = use_cache _lowercase : int = add_router_probs _lowercase : Tuple = router_z_loss_coef _lowercase : Dict = router_aux_loss_coef _lowercase : Optional[Any] = self.feed_forward_proj.split('-' ) _lowercase : List[Any] = act_info[-1] _lowercase : int = act_info[0] == 'gated' if len(UpperCAmelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_ ) > 2: raise ValueError( F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": _lowercase : Dict = 'gelu_new' super().__init__( pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ , )
711
from __future__ import annotations class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase=None ): _lowercase : int = data _lowercase : Union[str, Any] = None def __repr__( self ): _lowercase : Dict = [] _lowercase : Tuple = self while temp: string_rep.append(F"""{temp.data}""" ) _lowercase : Optional[Any] = temp.next return "->".join(_lowerCAmelCase ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: if not elements_list: raise Exception('The Elements List is empty' ) _lowercase : Union[str, Any] = Node(elements_list[0] ) for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): _lowercase : Optional[int] = Node(elements_list[i] ) _lowercase : List[Any] = current.next return head def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None: if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): print_reverse(head_node.next ) print(head_node.data ) def __magic_name__ ( ) -> List[str]: from doctest import testmod testmod() _lowercase : int = make_linked_list([14, 52, 14, 12, 43] ) print('Linked List:' ) print(SCREAMING_SNAKE_CASE ) print('Elements in Reverse:' ) print_reverse(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
677
0
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int: if n == 1 or not isinstance(_lowercase , _lowercase ): return 0 elif n == 2: return 1 else: _lowercase : Tuple = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: _lowercase : List[str] = 0 _lowercase : Optional[int] = 2 while digits < n: index += 1 _lowercase : Optional[Any] = len(str(fibonacci(_lowercase ) ) ) return index def __magic_name__ ( SCREAMING_SNAKE_CASE = 1_000 ) -> Optional[Any]: return fibonacci_digits_index(_lowercase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
712
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007 def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut: return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut: return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2) if __name__ == "__main__": def __magic_name__ ( ) -> None: from timeit import timeit print('Without Numpy' ) print( timeit( 'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) print('With Numpy' ) print( timeit( 'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) benchmark()
677
0
import random def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool: _lowercase : Tuple = num - 1 _lowercase : Tuple = 0 while s % 2 == 0: _lowercase : Tuple = s // 2 t += 1 for _ in range(5 ): _lowercase : Dict = random.randrange(2 , num - 1 ) _lowercase : List[Any] = pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if v != 1: _lowercase : List[str] = 0 while v != (num - 1): if i == t - 1: return False else: _lowercase : Tuple = i + 1 _lowercase : Any = (v**2) % num return True def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool: if num < 2: return False _lowercase : Union[str, Any] = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(__UpperCamelCase ) def __magic_name__ ( SCREAMING_SNAKE_CASE = 1_024 ) -> int: while True: _lowercase : Optional[Any] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(__UpperCamelCase ): return num if __name__ == "__main__": UpperCamelCase = generate_large_prime() print(("Prime number:", num)) print(("is_prime_low_num:", is_prime_low_num(num)))
713
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { "configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", "Swinv2Model", "Swinv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
'''simple docstring''' from __future__ import annotations from math import gcd def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 2 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 3 , ) -> Union[str, Any]: # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError('The input value cannot be less than 2' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: return (pow(SCREAMING_SNAKE_CASE , 2 ) + step) % modulus for _ in range(SCREAMING_SNAKE_CASE ): # These track the position within the cycle detection logic. _lowercase : Any = seed _lowercase : Dict = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. _lowercase : Optional[Any] = rand_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _lowercase : Any = rand_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _lowercase : Optional[Any] = rand_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. _lowercase : Optional[Any] = gcd(hare - tortoise , SCREAMING_SNAKE_CASE ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. _lowercase : Dict = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse UpperCamelCase = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) UpperCamelCase = parser.parse_args() UpperCamelCase = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'''{args.num} is probably prime''') else: UpperCamelCase = args.num // divisor print(f'''{args.num} = {divisor} * {quotient}''')
714
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase = { "vocab_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt" ), "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json" ), "google/electra-base-generator": ( "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json" ), "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json" ), }, } UpperCamelCase = { "google/electra-small-generator": 512, "google/electra-base-generator": 512, "google/electra-large-generator": 512, "google/electra-small-discriminator": 512, "google/electra-base-discriminator": 512, "google/electra-large-discriminator": 512, } UpperCamelCase = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Any = VOCAB_FILES_NAMES _UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[str] = ElectraTokenizer def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ): super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) _lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars ): _lowercase : Any = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) ) _lowercase : Dict = do_lower_case _lowercase : Optional[Any] = strip_accents _lowercase : Any = tokenize_chinese_chars _lowercase : Tuple = normalizer_class(**_lowerCAmelCase ) _lowercase : Union[str, Any] = do_lower_case def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ): _lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : str = [self.sep_token_id] _lowercase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : Any = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
677
0
import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( lowercase__ ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ): warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , __lowercase , ) super().__init__(*__lowercase , **__lowercase )
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): _lowercase : Tuple = tempfile.mkdtemp() # fmt: off _lowercase : List[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on _lowercase : List[Any] = dict(zip(A_ , range(len(A_ ) ) ) ) _lowercase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] _lowercase : Any = {"unk_token": "<unk>"} _lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _lowercase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) _lowercase : Dict = { "do_resize": True, "size": 2_0, "do_center_crop": True, "crop_size": 1_8, "do_normalize": True, "image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], "image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } _lowercase : str = os.path.join(self.tmpdirname , A_ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(A_ , A_ ) def __a ( self , **_lowerCAmelCase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **A_ ) def __a ( self , **_lowerCAmelCase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A_ ) def __a ( self , **_lowerCAmelCase ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ ) def __a ( self ): shutil.rmtree(self.tmpdirname ) def __a ( self ): _lowercase : List[str] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _lowercase : str = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __a ( self ): _lowercase : Dict = self.get_tokenizer() _lowercase : Dict = self.get_rust_tokenizer() _lowercase : List[Any] = self.get_image_processor() _lowercase : Any = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) processor_slow.save_pretrained(self.tmpdirname ) _lowercase : int = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A_ ) _lowercase : str = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) processor_fast.save_pretrained(self.tmpdirname ) _lowercase : int = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , A_ ) self.assertIsInstance(processor_fast.tokenizer , A_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , A_ ) self.assertIsInstance(processor_fast.image_processor , A_ ) def __a ( self ): _lowercase : Optional[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _lowercase : Union[str, Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowercase : Tuple = self.get_image_processor(do_normalize=A_ , padding_value=1.0 ) _lowercase : List[str] = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def __a ( self ): _lowercase : Any = self.get_image_processor() _lowercase : str = self.get_tokenizer() _lowercase : List[Any] = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) _lowercase : Union[str, Any] = self.prepare_image_inputs() _lowercase : Optional[int] = image_processor(A_ , return_tensors='np' ) _lowercase : str = processor(images=A_ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __a ( self ): _lowercase : Union[str, Any] = self.get_image_processor() _lowercase : List[str] = self.get_tokenizer() _lowercase : Tuple = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) _lowercase : Tuple = "lower newer" _lowercase : List[str] = processor(text=A_ ) _lowercase : Optional[Any] = tokenizer(A_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __a ( self ): _lowercase : str = self.get_image_processor() _lowercase : Tuple = self.get_tokenizer() _lowercase : Union[str, Any] = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) _lowercase : int = "lower newer" _lowercase : Any = self.prepare_image_inputs() _lowercase : Optional[int] = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def __a ( self ): _lowercase : List[str] = self.get_image_processor() _lowercase : str = self.get_tokenizer() _lowercase : List[Any] = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) _lowercase : Optional[Any] = self.prepare_image_inputs() _lowercase : Optional[Any] = self.prepare_image_inputs() _lowercase : Optional[int] = processor(images=A_ , visual_prompt=A_ ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def __a ( self ): _lowercase : str = self.get_image_processor() _lowercase : List[Any] = self.get_tokenizer() _lowercase : List[Any] = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ ) _lowercase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowercase : Optional[int] = processor.batch_decode(A_ ) _lowercase : Optional[int] = tokenizer.batch_decode(A_ ) self.assertListEqual(A_ , A_ )
716
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: for attribute in key.split('.' ): _lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: _lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: _lowercase : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowercase : List[str] = value elif weight_type == "weight_g": _lowercase : Any = value elif weight_type == "weight_v": _lowercase : Tuple = value elif weight_type == "bias": _lowercase : List[str] = value else: _lowercase : Dict = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Optional[int] = [] _lowercase : Optional[int] = fairseq_model.state_dict() _lowercase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _lowercase : Dict = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) _lowercase : int = True else: for key, mapped_key in MAPPING.items(): _lowercase : Union[str, Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned): _lowercase : Union[str, Any] = True if "*" in mapped_key: _lowercase : Dict = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2] _lowercase : Dict = mapped_key.replace('*' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: _lowercase : Optional[int] = 'weight_g' elif "weight_v" in name: _lowercase : Optional[Any] = 'weight_v' elif "weight" in name: _lowercase : str = 'weight' elif "bias" in name: _lowercase : Any = 'bias' else: _lowercase : str = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Any = full_name.split('conv_layers.' )[-1] _lowercase : Any = name.split('.' ) _lowercase : Optional[Any] = int(items[0] ) _lowercase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowercase : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowercase : List[str] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _lowercase : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowercase : List[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) @torch.no_grad() def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: if config_path is not None: _lowercase : Optional[int] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: _lowercase : List[Any] = HubertConfig() if is_finetuned: if dict_path: _lowercase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowercase : Dict = target_dict.pad_index _lowercase : Dict = target_dict.bos_index _lowercase : Tuple = target_dict.eos_index _lowercase : List[Any] = len(target_dict.symbols ) _lowercase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) ) return os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , SCREAMING_SNAKE_CASE ) _lowercase : int = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , ) _lowercase : str = True if config.feat_extract_norm == 'layer' else False _lowercase : Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) _lowercase : Tuple = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE ) else: _lowercase : List[Any] = HubertModel(SCREAMING_SNAKE_CASE ) if is_finetuned: _lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: _lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _lowercase : int = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) UpperCamelCase = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
677
0
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel UpperCamelCase = { '''gwf-440k''': { '''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''', '''sample_rate''': 48_000, '''sample_size''': 65_536, }, '''jmann-small-190k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''', '''sample_rate''': 48_000, '''sample_size''': 65_536, }, '''jmann-large-580k''': { '''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''', '''sample_rate''': 48_000, '''sample_size''': 131_072, }, '''maestro-uncond-150k''': { '''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''', '''sample_rate''': 16_000, '''sample_size''': 65_536, }, '''unlocked-uncond-250k''': { '''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''', '''sample_rate''': 16_000, '''sample_size''': 65_536, }, '''honk-140k''': { '''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''', '''sample_rate''': 16_000, '''sample_size''': 65_536, }, } def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: return torch.atana(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / math.pi * 2 def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : Dict = torch.sin(t * math.pi / 2 ) ** 2 _lowercase : Union[str, Any] = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) class lowerCAmelCase_ ( UpperCamelCase__ ): pass class lowerCAmelCase_ ( nn.Module ): def __init__( self , _lowerCAmelCase ): super().__init__() _lowercase : Union[str, Any] = DiffusionAttnUnetaD(_lowerCAmelCase , n_attn_layers=4 ) _lowercase : Union[str, Any] = deepcopy(self.diffusion ) _lowercase : Optional[Any] = torch.quasirandom.SobolEngine(1 , scramble=_lowerCAmelCase ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Optional[int] = MODELS_MAP[model_name]['url'] os.system(F"""wget {url} ./""" ) return F"""./{model_name}.ckpt""" UpperCamelCase = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', } UpperCamelCase = { '''8''': '''resnets.0''', '''9''': '''attentions.0''', '''10''': '''resnets.1''', '''11''': '''attentions.1''', '''12''': '''resnets.2''', '''13''': '''attentions.2''', } UpperCamelCase = { '''1''': '''resnets.0''', '''2''': '''attentions.0''', '''3''': '''resnets.1''', '''4''': '''attentions.1''', '''5''': '''resnets.2''', '''6''': '''attentions.2''', '''8''': '''resnets.3''', '''9''': '''attentions.3''', '''10''': '''resnets.4''', '''11''': '''attentions.4''', '''12''': '''resnets.5''', '''13''': '''attentions.5''', } UpperCamelCase = { '''0''': '''resnets.0''', '''1''': '''resnets.1''', '''2''': '''resnets.2''', '''4''': '''resnets.0''', '''5''': '''resnets.1''', '''6''': '''resnets.2''', } UpperCamelCase = { '''skip''': '''conv_skip''', '''main.0''': '''conv_1''', '''main.1''': '''group_norm_1''', '''main.3''': '''conv_2''', '''main.4''': '''group_norm_2''', } UpperCamelCase = { '''norm''': '''group_norm''', '''qkv_proj''': ['''query''', '''key''', '''value'''], '''out_proj''': ['''proj_attn'''], } def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]: if name.startswith('skip' ): return name.replace('skip' , RES_CONV_MAP['skip'] ) # name has to be of format main.{digit} if not name.startswith('main.' ): raise ValueError(F"""ResConvBlock error with {name}""" ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str: for key, value in ATTN_MAP.items(): if name.startswith(SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return name.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif name.startswith(SCREAMING_SNAKE_CASE ): return [name.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for v in value] raise ValueError(F"""Attn error with {name}""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 ) -> Optional[Any]: _lowercase : List[str] = input_string if string.split('.' )[0] == "timestep_embed": return string.replace('timestep_embed' , 'time_proj' ) _lowercase : Any = 0 if string.startswith('net.3.' ): depth += 1 _lowercase : str = string[6:] elif string.startswith('net.' ): _lowercase : Optional[Any] = string[4:] while string.startswith('main.7.' ): depth += 1 _lowercase : Any = string[7:] if string.startswith('main.' ): _lowercase : Optional[int] = string[5:] # mid block if string[:2].isdigit(): _lowercase : Any = string[:2] _lowercase : Dict = string[2:] else: _lowercase : Union[str, Any] = string[0] _lowercase : Tuple = string[1:] if depth == max_depth: _lowercase : Any = MID_NUM_TO_LAYER[layer_num] _lowercase : List[str] = 'mid_block' elif depth > 0 and int(SCREAMING_SNAKE_CASE ) < 7: _lowercase : str = DOWN_NUM_TO_LAYER[layer_num] _lowercase : Dict = F"""down_blocks.{depth}""" elif depth > 0 and int(SCREAMING_SNAKE_CASE ) > 7: _lowercase : str = UP_NUM_TO_LAYER[layer_num] _lowercase : str = F"""up_blocks.{max_depth - depth - 1}""" elif depth == 0: _lowercase : Optional[Any] = DEPTH_0_TO_LAYER[layer_num] _lowercase : str = F"""up_blocks.{max_depth - 1}""" if int(SCREAMING_SNAKE_CASE ) > 3 else 'down_blocks.0' if not string_left.startswith('.' ): raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" ) _lowercase : str = string_left[1:] if "resnets" in new_layer: _lowercase : Optional[int] = convert_resconv_naming(SCREAMING_SNAKE_CASE ) elif "attentions" in new_layer: _lowercase : List[str] = convert_attn_naming(SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = new_string_left if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _lowercase : int = prefix + '.' + new_layer + '.' + string_left else: _lowercase : Optional[int] = [prefix + '.' + new_layer + '.' + s for s in string_left] return new_string def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: _lowercase : Union[str, Any] = {} for k, v in state_dict.items(): if k.endswith('kernel' ): # up- and downsample layers, don't have trainable weights continue _lowercase : List[Any] = rename(SCREAMING_SNAKE_CASE ) # check if we need to transform from Conv => Linear for attention if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _lowercase : List[Any] = transform_conv_attns(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: _lowercase : int = v return new_state_dict def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: if len(SCREAMING_SNAKE_CASE ) == 1: if len(v.shape ) == 3: # weight _lowercase : int = v[:, :, 0] else: # bias _lowercase : Tuple = v else: # qkv matrices _lowercase : str = v.shape[0] _lowercase : Dict = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _lowercase : List[str] = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _lowercase : List[Any] = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]: _lowercase : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) _lowercase : Optional[Any] = args.model_path.split('/' )[-1].split('.' )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}""" _lowercase : List[str] = download(SCREAMING_SNAKE_CASE ) _lowercase : str = MODELS_MAP[model_name]['sample_rate'] _lowercase : Union[str, Any] = MODELS_MAP[model_name]['sample_size'] _lowercase : Optional[Any] = Object() _lowercase : List[str] = sample_size _lowercase : List[Any] = sample_rate _lowercase : List[Any] = 0 _lowercase : Tuple = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE , sample_rate=SCREAMING_SNAKE_CASE ) _lowercase : Dict = diffusers_model.state_dict() _lowercase : str = DiffusionUncond(SCREAMING_SNAKE_CASE ) orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE )['state_dict'] ) _lowercase : Union[str, Any] = orig_model.diffusion_ema.eval() _lowercase : List[Any] = orig_model.state_dict() _lowercase : Optional[int] = rename_orig_weights(SCREAMING_SNAKE_CASE ) _lowercase : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _lowercase : List[Any] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(SCREAMING_SNAKE_CASE ) == 0, F"""Problem with {renamed_minus_diffusers}""" assert all(k.endswith('kernel' ) for k in list(SCREAMING_SNAKE_CASE ) ), F"""Problem with {diffusers_minus_renamed}""" for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F"""Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}""" if key == "time_proj.weight": _lowercase : str = value.squeeze() _lowercase : str = value diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE ) _lowercase : Optional[int] = 100 _lowercase : str = 33 _lowercase : Union[str, Any] = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE ) _lowercase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE ) _lowercase : Optional[Any] = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) _lowercase : Optional[int] = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE )[:-1] _lowercase : int = get_crash_schedule(SCREAMING_SNAKE_CASE ) _lowercase : List[str] = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = torch.manual_seed(33 ) _lowercase : Optional[Any] = pipe(num_inference_steps=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).audios _lowercase : List[str] = sampling.iplms_sample(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {} ) _lowercase : Optional[int] = generated.clamp(-1 , 1 ) _lowercase : Optional[Any] = (generated - audio).abs().sum() _lowercase : Optional[int] = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print('Diff sum' , SCREAMING_SNAKE_CASE ) print('Diff max' , SCREAMING_SNAKE_CASE ) assert diff_max < 1E-3, F"""Diff max: {diff_max} is too much :-/""" print(F"""Conversion for {model_name} successful!""" ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") UpperCamelCase = parser.parse_args() main(args)
717
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ): _lowercase : List[str] = parent _lowercase : Optional[Any] = batch_size _lowercase : str = seq_length _lowercase : Dict = is_training _lowercase : Optional[int] = use_input_mask _lowercase : List[Any] = use_token_type_ids _lowercase : Union[str, Any] = use_labels _lowercase : Optional[Any] = vocab_size _lowercase : Optional[Any] = hidden_size _lowercase : str = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[Any] = hidden_act _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : int = max_position_embeddings _lowercase : str = type_vocab_size _lowercase : Tuple = type_sequence_label_size _lowercase : Dict = initializer_range _lowercase : List[Any] = num_labels _lowercase : List[str] = num_choices _lowercase : Dict = scope _lowercase : List[Any] = range_bbox def __a ( self ): _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _lowercase : List[str] = bbox[i, j, 3] _lowercase : Optional[int] = bbox[i, j, 1] _lowercase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowercase : Dict = bbox[i, j, 2] _lowercase : Dict = bbox[i, j, 0] _lowercase : int = t _lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase ) _lowercase : Any = None if self.use_input_mask: _lowercase : int = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : Tuple = None if self.use_token_type_ids: _lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : Tuple = None _lowercase : Union[str, Any] = None _lowercase : List[str] = None if self.use_labels: _lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : str = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : Any = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase ) _lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase ) _lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : str = self.num_labels _lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase ) _lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Any = self.num_labels _lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase ) _lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase ) _lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self ): _lowercase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : List[Any] = config_and_inputs _lowercase : Optional[Any] = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCamelCase : Optional[int] = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _UpperCamelCase : Union[str, Any] = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _UpperCamelCase : str = False _UpperCamelCase : List[str] = True _UpperCamelCase : Tuple = 10 def __a ( self ): _lowercase : Optional[int] = TFLayoutLMModelTester(self ) _lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __a ( self ): _lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) @slow def __a ( self ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def __a ( self ): pass def __magic_name__ ( ) -> Optional[int]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off _lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231 _lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 _lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231 _lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) _lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @slow def __a ( self ): _lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) # test the sequence output on [0, :3, :3] _lowercase : Optional[Any] = tf.convert_to_tensor( [[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) ) # test the pooled output on [1, :3] _lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) ) @slow def __a ( self ): # initialize model with randomly initialized sequence classification head _lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Any = model( input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar _lowercase : List[Any] = outputs.loss _lowercase : Any = (2,) self.assertEqual(loss.shape , _lowerCAmelCase ) # test the shape of the logits _lowercase : str = outputs.logits _lowercase : Dict = (2, 2) self.assertEqual(logits.shape , _lowerCAmelCase ) @slow def __a ( self ): # initialize model with randomly initialized token classification head _lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Dict = model( input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) # test the shape of the logits _lowercase : Dict = outputs.logits _lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) ) self.assertEqual(logits.shape , _lowerCAmelCase ) @slow def __a ( self ): # initialize model with randomly initialized token classification head _lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) # test the shape of the logits _lowercase : Any = tf.convert_to_tensor((2, 2_5) ) self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase ) self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
677
0
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self , _lowerCAmelCase ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): _lowercase : List[str] = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(lowerCamelCase__ ) def __a ( self ): _lowercase : Union[str, Any] = "sshleifer/tiny-gpt2" _lowercase : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) _lowercase : Union[str, Any] = PyTorchBenchmark(lowerCamelCase__ ) _lowercase : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ): _lowercase : List[str] = "sgugger/tiny-distilbert-classification" _lowercase : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , only_pretrain_model=lowerCamelCase__ , ) _lowercase : str = PyTorchBenchmark(lowerCamelCase__ ) _lowercase : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ): _lowercase : Dict = "sshleifer/tiny-gpt2" _lowercase : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , torchscript=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) _lowercase : Optional[Any] = PyTorchBenchmark(lowerCamelCase__ ) _lowercase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def __a ( self ): _lowercase : str = "sshleifer/tiny-gpt2" _lowercase : Dict = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , fpaa=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) _lowercase : Optional[int] = PyTorchBenchmark(lowerCamelCase__ ) _lowercase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ): _lowercase : str = "sshleifer/tiny-gpt2" _lowercase : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase__ ) # set architectures equal to `None` _lowercase : List[Any] = None _lowercase : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) _lowercase : str = PyTorchBenchmark(lowerCamelCase__ , configs=[config] ) _lowercase : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ): _lowercase : Union[str, Any] = "sshleifer/tiny-gpt2" _lowercase : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) _lowercase : List[str] = PyTorchBenchmark(lowerCamelCase__ ) _lowercase : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' ) def __a ( self ): _lowercase : Optional[int] = "sshleifer/tiny-gpt2" _lowercase : Union[str, Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowerCamelCase__ , multi_process=lowerCamelCase__ , ) _lowercase : str = PyTorchBenchmark(lowerCamelCase__ ) _lowercase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __a ( self ): _lowercase : Union[str, Any] = "sshleifer/tiny-gpt2" _lowercase : Any = AutoConfig.from_pretrained(lowerCamelCase__ ) _lowercase : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) _lowercase : Tuple = PyTorchBenchmark(lowerCamelCase__ , configs=[config] ) _lowercase : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ): _lowercase : Union[str, Any] = "sshleifer/tinier_bart" _lowercase : List[str] = AutoConfig.from_pretrained(lowerCamelCase__ ) _lowercase : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) _lowercase : Optional[int] = PyTorchBenchmark(lowerCamelCase__ , configs=[config] ) _lowercase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ): _lowercase : Any = "sshleifer/tiny-gpt2" _lowercase : Dict = AutoConfig.from_pretrained(lowerCamelCase__ ) _lowercase : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) _lowercase : Dict = PyTorchBenchmark(lowerCamelCase__ , configs=[config] ) _lowercase : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __a ( self ): _lowercase : List[str] = "sshleifer/tinier_bart" _lowercase : List[str] = AutoConfig.from_pretrained(lowerCamelCase__ ) _lowercase : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase__ , ) _lowercase : List[Any] = PyTorchBenchmark(lowerCamelCase__ , configs=[config] ) _lowercase : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __a ( self ): _lowercase : List[str] = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: _lowercase : Optional[int] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , save_to_csv=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCamelCase__ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowerCamelCase__ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowerCamelCase__ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowerCamelCase__ , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowerCamelCase__ , 'env.csv' ) , multi_process=lowerCamelCase__ , ) _lowercase : List[str] = PyTorchBenchmark(lowerCamelCase__ ) benchmark.run() self.assertTrue(Path(os.path.join(lowerCamelCase__ , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowerCamelCase__ , 'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowerCamelCase__ , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowerCamelCase__ , 'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowerCamelCase__ , 'env.csv' ) ).exists() ) def __a ( self ): _lowercase : Tuple = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(_lowerCAmelCase ): self.assertTrue(hasattr(lowerCamelCase__ , 'sequential' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'cumulative' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'current' ) ) self.assertTrue(hasattr(lowerCamelCase__ , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: _lowercase : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=lowerCamelCase__ , inference=lowerCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCamelCase__ , 'log.txt' ) , log_print=lowerCamelCase__ , trace_memory_line_by_line=lowerCamelCase__ , multi_process=lowerCamelCase__ , ) _lowercase : Optional[int] = PyTorchBenchmark(lowerCamelCase__ ) _lowercase : Union[str, Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(lowerCamelCase__ , 'log.txt' ) ).exists() )
718
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): _lowercase : List[str] = logging.get_logger() # the current default level is logging.WARNING _lowercase : Union[str, Any] = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(_lowerCAmelCase ) def __a ( self ): _lowercase : List[str] = logging.get_verbosity() _lowercase : int = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : Tuple = 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(_lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def __a ( self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var _lowercase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : int = os.getenv('TRANSFORMERS_VERBOSITY' , _lowerCAmelCase ) _lowercase : Optional[Any] = logging.log_levels[env_level_str] _lowercase : Dict = logging.get_verbosity() self.assertEqual( _lowerCAmelCase , _lowerCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level _lowercase : Any = '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def __a ( self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() _lowercase : Tuple = logging.logging.getLogger() with CaptureLogger(_lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def __a ( self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() _lowercase : str = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : List[str] = 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning_advice(_lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning_advice(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def __magic_name__ ( ) -> List[str]: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
677
0
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) _lowercase : Tuple = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowercase__ ) ) return round(lowercase__ , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
719
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): UpperCamelCase = "pt" elif is_tf_available(): UpperCamelCase = "tf" else: UpperCamelCase = "jax" class lowerCAmelCase_ ( __snake_case , unittest.TestCase ): _UpperCamelCase : Dict = PerceiverTokenizer _UpperCamelCase : str = False def __a ( self ): super().setUp() _lowercase : List[Any] = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def __a ( self , **_lowerCAmelCase ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=2_0 , _lowerCAmelCase=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. _lowercase : Union[str, Any] = [] for i in range(len(_lowerCAmelCase ) ): try: _lowercase : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) _lowercase : List[Any] = list(filter(lambda _lowerCAmelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _lowerCAmelCase ) ) _lowercase : Union[str, Any] = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCAmelCase ) , _lowerCAmelCase ) ) if max_length is not None and len(_lowerCAmelCase ) > max_length: _lowercase : Any = toks[:max_length] if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0: while len(_lowerCAmelCase ) < min_length: _lowercase : Optional[Any] = toks + toks # toks_str = [t[1] for t in toks] _lowercase : Optional[Any] = [t[0] for t in toks] # Ensure consistency _lowercase : Any = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) if " " not in output_txt and len(_lowerCAmelCase ) > 1: _lowercase : List[str] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase ) ) if with_prefix_space: _lowercase : List[Any] = ' ' + output_txt _lowercase : Dict = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) return output_txt, output_ids def __a ( self ): _lowercase : Dict = self.perceiver_tokenizer _lowercase : Optional[Any] = 'Unicode €.' _lowercase : str = tokenizer(_lowerCAmelCase ) _lowercase : int = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded['input_ids'] , _lowerCAmelCase ) # decoding _lowercase : List[Any] = tokenizer.decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , '[CLS]Unicode €.[SEP]' ) _lowercase : Union[str, Any] = tokenizer('e è é ê ë' ) _lowercase : List[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded['input_ids'] , _lowerCAmelCase ) # decoding _lowercase : int = tokenizer.decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def __a ( self ): _lowercase : List[str] = self.perceiver_tokenizer _lowercase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off _lowercase : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on _lowercase : List[Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) if FRAMEWORK != "jax": _lowercase : int = list(batch.input_ids.numpy()[0] ) else: _lowercase : List[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def __a ( self ): _lowercase : List[Any] = self.perceiver_tokenizer _lowercase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _lowercase : List[str] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _lowerCAmelCase ) self.assertIn('attention_mask' , _lowerCAmelCase ) self.assertNotIn('decoder_input_ids' , _lowerCAmelCase ) self.assertNotIn('decoder_attention_mask' , _lowerCAmelCase ) def __a ( self ): _lowercase : Optional[int] = self.perceiver_tokenizer _lowercase : Optional[Any] = [ 'Summary of the text.', 'Another summary.', ] _lowercase : Optional[int] = tokenizer( text_target=_lowerCAmelCase , max_length=3_2 , padding='max_length' , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) self.assertEqual(3_2 , targets['input_ids'].shape[1] ) def __a ( self ): # safety check on max_len default value so we are sure the test works _lowercase : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test _lowercase : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : Dict = tempfile.mkdtemp() _lowercase : Tuple = ' He is very happy, UNwant\u00E9d,running' _lowercase : Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) _lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase ) _lowercase : Optional[Any] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) shutil.rmtree(_lowerCAmelCase ) _lowercase : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : List[str] = tempfile.mkdtemp() _lowercase : int = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) _lowercase : Any = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _lowercase : Tuple = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) _lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase ) _lowercase : Tuple = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) _lowercase : List[Any] = tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _lowercase : List[str] = json.load(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _lowercase : Tuple = json.load(_lowerCAmelCase ) _lowercase : Any = [F"""<extra_id_{i}>""" for i in range(1_2_5 )] _lowercase : str = added_tokens_extra_ids + [ 'an_additional_special_token' ] _lowercase : Optional[int] = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_lowerCAmelCase , _lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_lowerCAmelCase , _lowerCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowercase : Optional[int] = tokenizer_class.from_pretrained( _lowerCAmelCase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowercase : int = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_lowerCAmelCase )] _lowercase : Tuple = tokenizer_class.from_pretrained( _lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __a ( self ): _lowercase : str = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , '�' ) def __a ( self ): pass def __a ( self ): pass def __a ( self ): pass def __a ( self ): pass def __a ( self ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens _lowercase : List[str] = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _lowercase : Optional[Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] _lowercase : Optional[Any] = tokenizer.convert_tokens_to_string(_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
677
0
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: _lowercase : Union[str, Any] = """""" for i in table: res += inp[i - 1] return res def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict: return data[1:] + data[0] def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: _lowercase : Union[str, Any] = """""" for i in range(len(UpperCamelCase__ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: _lowercase : Union[str, Any] = int('0b' + data[0] + data[-1] , 2 ) _lowercase : Optional[int] = int('0b' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: _lowercase : int = message[:4] _lowercase : int = message[4:] _lowercase : List[str] = apply_table(UpperCamelCase__ , UpperCamelCase__ ) _lowercase : Tuple = xor(UpperCamelCase__ , UpperCamelCase__ ) _lowercase : int = apply_sbox(UpperCamelCase__ , temp[:4] ) # noqa: E741 _lowercase : List[str] = apply_sbox(UpperCamelCase__ , temp[4:] ) _lowercase : str = """0""" * (2 - len(UpperCamelCase__ )) + l # noqa: E741 _lowercase : List[Any] = """0""" * (2 - len(UpperCamelCase__ )) + r _lowercase : Optional[Any] = apply_table(l + r , UpperCamelCase__ ) _lowercase : Dict = xor(UpperCamelCase__ , UpperCamelCase__ ) return temp + right if __name__ == "__main__": UpperCamelCase = input("Enter 10 bit key: ") UpperCamelCase = input("Enter 8 bit message: ") UpperCamelCase = [6, 3, 7, 4, 8, 5, 10, 9] UpperCamelCase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] UpperCamelCase = [2, 4, 3, 1] UpperCamelCase = [2, 6, 3, 1, 4, 8, 5, 7] UpperCamelCase = [4, 1, 3, 5, 7, 2, 8, 6] UpperCamelCase = [4, 1, 2, 3, 2, 3, 4, 1] UpperCamelCase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] UpperCamelCase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation UpperCamelCase = apply_table(key, paa_table) UpperCamelCase = temp[:5] UpperCamelCase = temp[5:] UpperCamelCase = left_shift(left) UpperCamelCase = left_shift(right) UpperCamelCase = apply_table(left + right, pa_table) UpperCamelCase = left_shift(left) UpperCamelCase = left_shift(right) UpperCamelCase = left_shift(left) UpperCamelCase = left_shift(right) UpperCamelCase = apply_table(left + right, pa_table) # encryption UpperCamelCase = apply_table(message, IP) UpperCamelCase = function(expansion, sa, sa, keya, temp) UpperCamelCase = temp[4:] + temp[:4] UpperCamelCase = function(expansion, sa, sa, keya, temp) UpperCamelCase = apply_table(temp, IP_inv) print("Cipher text is:", CT) # decryption UpperCamelCase = apply_table(CT, IP) UpperCamelCase = function(expansion, sa, sa, keya, temp) UpperCamelCase = temp[4:] + temp[:4] UpperCamelCase = function(expansion, sa, sa, keya, temp) UpperCamelCase = apply_table(temp, IP_inv) print("Plain text after decypting is:", PT)
720
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["ConditionalDetrFeatureExtractor"] UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
721
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Tuple = "ClapFeatureExtractor" _UpperCamelCase : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): _lowercase : str = kwargs.pop('sampling_rate' , _lowerCAmelCase ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: _lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if audios is not None: _lowercase : Any = self.feature_extractor( _lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if text is not None and audios is not None: _lowercase : Union[str, Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase ) def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property def __a ( self ): _lowercase : Dict = self.tokenizer.model_input_names _lowercase : Any = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
677
0
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Any: try: _lowercase : str = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _lowercase : Union[str, Any] = default else: # KEY is set, convert it to True or False. try: _lowercase : Union[str, Any] = strtobool(SCREAMING_SNAKE_CASE ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"""If set, {key} must be yes or no.""" ) return _value UpperCamelCase = parse_flag_from_env("RUN_SLOW", default=False) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]: return unittest.skip('Test was skipped' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str: return unittest.skipUnless(_run_slow_tests , 'test is slow' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict: return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict: return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]: return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int: return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]: return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]: return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]: return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict: return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]: return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int: return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]: return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Optional[Any]: if test_case is None: return partial(SCREAMING_SNAKE_CASE , version=SCREAMING_SNAKE_CASE ) return unittest.skipUnless(is_torch_version('>=' , SCREAMING_SNAKE_CASE ) , F"""test requires torch version >= {version}""" )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple: return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int: return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]: return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(SCREAMING_SNAKE_CASE ) UpperCamelCase = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict: return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(SCREAMING_SNAKE_CASE ) class lowerCAmelCase_ ( unittest.TestCase ): _UpperCamelCase : Union[str, Any] = True @classmethod def __a ( cls ): _lowercase : int = tempfile.mkdtemp() @classmethod def __a ( cls ): if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __a ( self ): if self.clear_on_setup: for path in Path(self.tmpdir ).glob('**/*' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A__ ) class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self , _lowerCAmelCase ): _lowercase : Any = mocks if isinstance(A__ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple: _lowercase : List[str] = AcceleratorState() _lowercase : Optional[Any] = tensor[None].clone().to(state.device ) _lowercase : Optional[Any] = gather(SCREAMING_SNAKE_CASE ).cpu() _lowercase : Tuple = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , SCREAMING_SNAKE_CASE ): return False return True class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Dict = returncode _lowercase : Optional[Any] = stdout _lowercase : int = stderr async def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: while True: _lowercase : List[str] = await stream.readline() if line: callback(SCREAMING_SNAKE_CASE ) else: break async def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> _RunOutput: if echo: print('\nRunning: ' , ' '.join(SCREAMING_SNAKE_CASE ) ) _lowercase : List[str] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _lowercase : Tuple = [] _lowercase : Any = [] def tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" ): _lowercase : Dict = line.decode('utf-8' ).rstrip() sink.append(SCREAMING_SNAKE_CASE ) if not quiet: print(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , file=SCREAMING_SNAKE_CASE ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE : tee(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sys.stderr , label='stderr:' ) ) ), ] , timeout=SCREAMING_SNAKE_CASE , ) return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=180 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True ) -> _RunOutput: _lowercase : Dict = asyncio.get_event_loop() _lowercase : int = loop.run_until_complete( _stream_subprocess(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , stdin=SCREAMING_SNAKE_CASE , timeout=SCREAMING_SNAKE_CASE , quiet=SCREAMING_SNAKE_CASE , echo=SCREAMING_SNAKE_CASE ) ) _lowercase : List[str] = ' '.join(SCREAMING_SNAKE_CASE ) if result.returncode > 0: _lowercase : int = '\n'.join(result.stderr ) raise RuntimeError( F"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" F"""The combined stderr from workers follows:\n{stderr}""" ) return result class lowerCAmelCase_ ( __a ): pass def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: try: _lowercase : Dict = subprocess.check_output(SCREAMING_SNAKE_CASE , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(SCREAMING_SNAKE_CASE , 'decode' ): _lowercase : Optional[Any] = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F"""Command `{' '.join(SCREAMING_SNAKE_CASE )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
700
from __future__ import annotations from typing import Any class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase ): _lowercase : Any = num_of_nodes _lowercase : list[list[int]] = [] _lowercase : dict[int, int] = {} def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): self.m_edges.append([u_node, v_node, weight] ) def __a ( self , _lowerCAmelCase ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def __a ( self , _lowerCAmelCase ): if self.m_component[u_node] != u_node: for k in self.m_component: _lowercase : Optional[int] = self.find_component(_lowerCAmelCase ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): if component_size[u_node] <= component_size[v_node]: _lowercase : str = v_node component_size[v_node] += component_size[u_node] self.set_component(_lowerCAmelCase ) elif component_size[u_node] >= component_size[v_node]: _lowercase : Any = self.find_component(_lowerCAmelCase ) component_size[u_node] += component_size[v_node] self.set_component(_lowerCAmelCase ) def __a ( self ): _lowercase : Any = [] _lowercase : Optional[Any] = 0 _lowercase : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) _lowercase : str = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: _lowercase , _lowercase , _lowercase : List[str] = edge _lowercase : Union[str, Any] = self.m_component[u] _lowercase : Union[str, Any] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): _lowercase : str = [u, v, w] for edge in minimum_weight_edge: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowercase , _lowercase , _lowercase : int = edge _lowercase : Optional[int] = self.m_component[u] _lowercase : Optional[Any] = self.m_component[v] if u_component != v_component: mst_weight += w self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 _lowercase : str = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def __magic_name__ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
677
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : List[Any] = 'longformer' def __init__( self , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 7_6_8 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 3_0_7_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ): super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) _lowercase : int = attention_window _lowercase : List[str] = sep_token_id _lowercase : Tuple = bos_token_id _lowercase : Dict = eos_token_id _lowercase : List[str] = vocab_size _lowercase : str = hidden_size _lowercase : Dict = num_hidden_layers _lowercase : Dict = num_attention_heads _lowercase : Union[str, Any] = hidden_act _lowercase : Any = intermediate_size _lowercase : Optional[Any] = hidden_dropout_prob _lowercase : Any = attention_probs_dropout_prob _lowercase : List[Any] = max_position_embeddings _lowercase : Union[str, Any] = type_vocab_size _lowercase : List[Any] = initializer_range _lowercase : List[Any] = layer_norm_eps _lowercase : Optional[Any] = onnx_export class lowerCAmelCase_ ( __snake_case ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ): super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowercase : Optional[Any] = True @property def __a ( self ): if self.task == "multiple-choice": _lowercase : int = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowercase : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('global_attention_mask', dynamic_axis), ] ) @property def __a ( self ): _lowercase : Union[str, Any] = super().outputs if self.task == "default": _lowercase : Optional[Any] = {0: 'batch'} return outputs @property def __a ( self ): return 1E-4 @property def __a ( self ): return max(super().default_onnx_opset , 1_4 ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ): _lowercase : Union[str, Any] = super().generate_dummy_inputs( preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly _lowercase : Any = torch.zeros_like(inputs['input_ids'] ) # make every second token global _lowercase : Any = 1 return inputs
701
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : Tuple = {} _lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids'] _lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] ) return output UpperCamelCase = HfArgumentParser(PretokenizationArguments) UpperCamelCase = parser.parse_args() if args.num_workers is None: UpperCamelCase = multiprocessing.cpu_count() UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCamelCase = time.time() UpperCamelCase = load_dataset(args.dataset_name, split="train") print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() UpperCamelCase = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
677
0
import os def __magic_name__ ( ) -> Tuple: with open(os.path.dirname(lowercase__ ) + '/p022_names.txt' ) as file: _lowercase : List[Any] = str(file.readlines()[0] ) _lowercase : List[Any] = names.replace('"' , '' ).split(',' ) names.sort() _lowercase : Any = 0 _lowercase : int = 0 for i, name in enumerate(lowercase__ ): for letter in name: name_score += ord(lowercase__ ) - 64 total_score += (i + 1) * name_score _lowercase : Any = 0 return total_score if __name__ == "__main__": print(solution())
702
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration} UpperCamelCase = {"facebook/bart-base": BartTokenizer} def __magic_name__ ( ) -> str: _lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' ) parser.add_argument( '--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' ) parser.add_argument( '--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , ) parser.add_argument( '--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=( 'Number of beams to use for evaluation. This argument will be ' 'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.' ) , ) parser.add_argument( '--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , ) parser.add_argument( '--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , ) parser.add_argument( '--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , ) parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' ) _lowercase : Optional[Any] = parser.parse_args() return args def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]: _lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) _lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ) if model_name in ["facebook/bart-base"]: _lowercase : Dict = 0 _lowercase : Optional[int] = None _lowercase : Union[str, Any] = 0 return huggingface_model, tokenizer def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: model.eval() _lowercase : List[Any] = None _lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) ) with torch.no_grad(): _lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.' _lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device ) _lowercase : str = model.generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( SCREAMING_SNAKE_CASE , ( inputs['input_ids'], inputs['attention_mask'], num_beams, max_length, model.config.decoder_start_token_id, ) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={ 'input_ids': {0: 'batch', 1: 'seq'}, 'output_ids': {0: 'batch', 1: 'seq_out'}, } , example_outputs=SCREAMING_SNAKE_CASE , ) logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) ) _lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) ) logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) ) _lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE ) _lowercase : Union[str, Any] = ort_sess.run( SCREAMING_SNAKE_CASE , { 'input_ids': inputs['input_ids'].cpu().numpy(), 'attention_mask': inputs['attention_mask'].cpu().numpy(), 'num_beams': np.array(SCREAMING_SNAKE_CASE ), 'max_length': np.array(SCREAMING_SNAKE_CASE ), 'decoder_start_token_id': np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info('Model outputs from torch and ONNX Runtime are similar.' ) logger.info('Success.' ) def __magic_name__ ( ) -> Any: _lowercase : Dict = parse_args() _lowercase : Union[str, Any] = 5 _lowercase : Union[str, Any] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() _lowercase : Optional[Any] = torch.device(args.device ) _lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE ) if model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' ) model.to(SCREAMING_SNAKE_CASE ) if args.max_length: _lowercase : Any = args.max_length if args.num_beams: _lowercase : List[str] = args.num_beams if args.output_file_path: _lowercase : Union[str, Any] = args.output_file_path else: _lowercase : Tuple = 'BART.onnx' logger.info('Exporting model to ONNX' ) export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
677
0
from __future__ import annotations from fractions import Fraction def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' _lowercase : int = [] _lowercase : Tuple = 11 _lowercase : Optional[int] = int('1' + '0' * digit_len ) for num in range(lowerCAmelCase_ , lowerCAmelCase_ ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(lowerCAmelCase_ , lowerCAmelCase_ ): solutions.append(F"""{num}/{den}""" ) den += 1 num += 1 _lowercase : int = 10 return solutions def __magic_name__ ( SCREAMING_SNAKE_CASE = 2 ) -> Dict: '''simple docstring''' _lowercase : Optional[int] = 1.0 for fraction in fraction_list(lowerCAmelCase_ ): _lowercase : Optional[Any] = Fraction(lowerCAmelCase_ ) result *= frac.denominator / frac.numerator return int(lowerCAmelCase_ ) if __name__ == "__main__": print(solution())
703
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCamelCase : Union[str, Any] = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) _UpperCamelCase : List[Any] = ( { "feature-extraction": TFMobileBertModel, "fill-mask": TFMobileBertForMaskedLM, "question-answering": TFMobileBertForQuestionAnswering, "text-classification": TFMobileBertForSequenceClassification, "token-classification": TFMobileBertForTokenClassification, "zero-shot": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) _UpperCamelCase : int = False _UpperCamelCase : Optional[int] = False def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ): _lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if return_labels: if model_class in get_values(_lowerCAmelCase ): _lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class lowerCAmelCase_ ( __snake_case ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): _lowercase : Optional[Any] = parent _lowercase : str = batch_size _lowercase : Optional[int] = seq_length _lowercase : Tuple = is_training _lowercase : List[Any] = use_input_mask _lowercase : Optional[Any] = use_token_type_ids _lowercase : Any = use_labels _lowercase : str = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[int] = intermediate_size _lowercase : Tuple = hidden_act _lowercase : Dict = hidden_dropout_prob _lowercase : Optional[int] = attention_probs_dropout_prob _lowercase : Tuple = max_position_embeddings _lowercase : List[str] = type_vocab_size _lowercase : Optional[Any] = type_sequence_label_size _lowercase : List[Any] = initializer_range _lowercase : List[str] = num_labels _lowercase : Union[str, Any] = num_choices _lowercase : List[str] = scope _lowercase : Union[str, Any] = embedding_size def __a ( self ): _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : Optional[int] = None if self.use_input_mask: _lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : int = None if self.use_token_type_ids: _lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : Dict = None _lowercase : Any = None _lowercase : int = None if self.use_labels: _lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : Optional[Any] = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase ) _lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : Union[str, Any] = model(_lowerCAmelCase ) _lowercase : Tuple = [input_ids, input_mask] _lowercase : str = model(_lowerCAmelCase ) _lowercase : List[str] = model(_lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase ) _lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : int = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase ) _lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : Optional[int] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase ) _lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : Union[str, Any] = model(_lowerCAmelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[int] = self.num_labels _lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase ) _lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : List[str] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = self.num_choices _lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase ) _lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowercase : str = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } _lowercase : Union[str, Any] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : List[str] = self.num_labels _lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase ) _lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : List[str] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase ) _lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : int = model(_lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self ): _lowercase : List[str] = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : int = config_and_inputs _lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict def __a ( self ): _lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self ) _lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase ) def __a ( self ): _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase ) def __a ( self ): _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase ) def __a ( self ): _lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase ) def __a ( self ): _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase ) def __a ( self ): _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase ) def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase ) @slow def __a ( self ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @slow def __a ( self ): _lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' ) _lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) _lowercase : List[str] = model(_lowerCAmelCase )[0] _lowercase : str = [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , _lowerCAmelCase ) _lowercase : List[Any] = tf.constant( [ [ [-4.5_91_95_47, -9.24_82_95, -9.64_52_56], [-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37], [-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
677
0
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: UpperCamelCase = None UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} UpperCamelCase = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json" ), }, } UpperCamelCase = { "facebook/nllb-large-en-ro": 1_024, "facebook/nllb-200-distilled-600M": 1_024, } # fmt: off UpperCamelCase = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class lowerCAmelCase_ ( _UpperCAmelCase ): _UpperCamelCase : Tuple = VOCAB_FILES_NAMES _UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""] _UpperCamelCase : Optional[int] = NllbTokenizer _UpperCamelCase : List[int] = [] _UpperCamelCase : List[int] = [] def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , **_lowerCAmelCase , ): # Mask token behave like a normal word, i.e. include the space before it _lowercase : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token _lowercase : str = legacy_behaviour super().__init__( vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , legacy_behaviour=lowercase__ , **lowercase__ , ) _lowercase : str = vocab_file _lowercase : str = False if not self.vocab_file else True _lowercase : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) _lowercase : List[Any] = { lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _lowercase : Optional[Any] = src_lang if src_lang is not None else """eng_Latn""" _lowercase : Dict = self.convert_tokens_to_ids(self._src_lang ) _lowercase : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __a ( self ): return self._src_lang @src_lang.setter def __a ( self , _lowerCAmelCase ): _lowercase : Union[str, Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : int = [self.sep_token_id] _lowercase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _lowercase : List[Any] = src_lang _lowercase : str = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ ) _lowercase : str = self.convert_tokens_to_ids(lowercase__ ) _lowercase : List[str] = tgt_lang_id return inputs def __a ( self , _lowerCAmelCase , _lowerCAmelCase = "eng_Latn" , _lowerCAmelCase = None , _lowerCAmelCase = "fra_Latn" , **_lowerCAmelCase , ): _lowercase : List[Any] = src_lang _lowercase : int = tgt_lang return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ ) def __a ( self ): return self.set_src_lang_special_tokens(self.src_lang ) def __a ( self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __a ( self , _lowerCAmelCase ): _lowercase : int = self.convert_tokens_to_ids(lowercase__ ) if self.legacy_behaviour: _lowercase : Dict = [] _lowercase : Optional[Any] = [self.eos_token_id, self.cur_lang_code] else: _lowercase : Optional[int] = [self.cur_lang_code] _lowercase : List[str] = [self.eos_token_id] _lowercase : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowercase : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens ) _lowercase : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __a ( self , _lowerCAmelCase ): _lowercase : str = self.convert_tokens_to_ids(lowercase__ ) if self.legacy_behaviour: _lowercase : Tuple = [] _lowercase : List[Any] = [self.eos_token_id, self.cur_lang_code] else: _lowercase : Any = [self.cur_lang_code] _lowercase : Union[str, Any] = [self.eos_token_id] _lowercase : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _lowercase : Any = self.convert_ids_to_tokens(self.suffix_tokens ) _lowercase : str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowercase__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" ) return _lowercase : str = os.path.join( lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ): copyfile(self.vocab_file , lowercase__ ) return (out_vocab_file,)
704
import qiskit def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> qiskit.result.counts.Counts: _lowercase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register _lowercase : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator _lowercase : Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase = single_qubit_measure(2, 2) print(f'''Total count for various states are: {counts}''')
677
0
import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class lowerCAmelCase_ ( __SCREAMING_SNAKE_CASE ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase ): _lowercase : Any = parent _lowercase : List[str] = config_class _lowercase : Optional[int] = has_text_modality _lowercase : List[str] = kwargs _lowercase : Tuple = common_properties def __a ( self ): _lowercase : Optional[Any] = self.config_class(**self.inputs_dict ) _lowercase : List[Any] = ( ['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers'''] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['vocab_size'] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(__snake_case , __snake_case ) , msg=F"""`{prop}` does not exist""" ) # Test that config has the common properties as setter for idx, name in enumerate(__snake_case ): try: setattr(__snake_case , __snake_case , __snake_case ) self.parent.assertEqual( getattr(__snake_case , __snake_case ) , __snake_case , msg=F"""`{name} value {idx} expected, but was {getattr(__snake_case , __snake_case )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(__snake_case ): try: _lowercase : List[str] = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(__snake_case , __snake_case ) , __snake_case , msg=F"""`{name} value {idx} expected, but was {getattr(__snake_case , __snake_case )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def __a ( self ): _lowercase : Tuple = self.config_class(**self.inputs_dict ) _lowercase : List[str] = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , __snake_case ) def __a ( self ): _lowercase : List[Any] = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowercase : Tuple = os.path.join(__snake_case , 'config.json' ) config_first.to_json_file(__snake_case ) _lowercase : int = self.config_class.from_json_file(__snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __a ( self ): _lowercase : Dict = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(__snake_case ) _lowercase : Union[str, Any] = self.config_class.from_pretrained(__snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __a ( self ): _lowercase : List[str] = self.config_class(**self.inputs_dict ) _lowercase : Dict = '''test''' with tempfile.TemporaryDirectory() as tmpdirname: _lowercase : List[Any] = os.path.join(__snake_case , __snake_case ) config_first.save_pretrained(__snake_case ) _lowercase : Union[str, Any] = self.config_class.from_pretrained(__snake_case , subfolder=__snake_case ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __a ( self ): _lowercase : Optional[int] = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) _lowercase : Dict = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def __a ( self ): if self.config_class.is_composition: return _lowercase : Any = self.config_class() self.parent.assertIsNotNone(__snake_case ) def __a ( self ): _lowercase : Dict = copy.deepcopy(__snake_case ) _lowercase : Optional[Any] = self.config_class(**__snake_case ) _lowercase : Any = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) ) elif getattr(__snake_case , __snake_case ) != value: wrong_values.append((key, getattr(__snake_case , __snake_case ), value) ) if len(__snake_case ) > 0: _lowercase : Tuple = '''\n'''.join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] ) raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" ) def __a ( self ): self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
705
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCamelCase = "platform" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict: if attention_mask is None: _lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ): _lowercase : List[str] = parent _lowercase : List[Any] = batch_size _lowercase : Optional[Any] = seq_length _lowercase : Optional[Any] = is_training _lowercase : Tuple = use_labels _lowercase : Dict = vocab_size _lowercase : Any = hidden_size _lowercase : Optional[Any] = num_hidden_layers _lowercase : Union[str, Any] = num_attention_heads _lowercase : Tuple = intermediate_size _lowercase : Any = hidden_act _lowercase : Optional[Any] = hidden_dropout_prob _lowercase : Tuple = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : str = eos_token_id _lowercase : int = pad_token_id _lowercase : Tuple = bos_token_id _lowercase : List[Any] = initializer_range def __a ( self ): _lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) _lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) _lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 ) _lowercase : Tuple = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , ) _lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return config, inputs_dict def __a ( self ): _lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = 2_0 _lowercase : List[Any] = model_class_name(_lowerCAmelCase ) _lowercase : List[Any] = model.encode(inputs_dict['input_ids'] ) _lowercase , _lowercase : int = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase ) _lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) _lowercase : int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowercase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _lowercase : int = model.decode( decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , ) _lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase ) _lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Dict = 2_0 _lowercase : Any = model_class_name(_lowerCAmelCase ) _lowercase : int = model.encode(inputs_dict['input_ids'] ) _lowercase , _lowercase : Optional[int] = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _lowercase : Union[str, Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase ) _lowercase : int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowercase : List[Any] = model.decode( decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _lowercase : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase ) _lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class lowerCAmelCase_ ( unittest.TestCase ): _UpperCamelCase : Tuple = 99 def __a ( self ): _lowercase : Dict = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) _lowercase : Union[str, Any] = input_ids.shape[0] _lowercase : Optional[int] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def __a ( self ): _lowercase , _lowercase , _lowercase : int = self._get_config_and_data() _lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase ) _lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase ) _lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['logits'].shape , _lowerCAmelCase ) def __a ( self ): _lowercase : Union[str, Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) _lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase ) _lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa ) _lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa ) _lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ) _lowercase : Tuple = (*summary.shape, config.vocab_size) self.assertEqual(outputs['logits'].shape , _lowerCAmelCase ) def __a ( self ): _lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa ) _lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 ) _lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum() _lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_lowerCAmelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ): _UpperCamelCase : int = True _UpperCamelCase : Any = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) _UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def __a ( self ): _lowercase : List[str] = FlaxBlenderbotSmallModelTester(self ) def __a ( self ): _lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __a ( self ): _lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __a ( self ): _lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) _lowercase : str = model_class(_lowerCAmelCase ) @jax.jit def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ): return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) with self.subTest('JIT Enabled' ): _lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def __a ( self ): _lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowercase : int = model_class(_lowerCAmelCase ) _lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) _lowercase : List[Any] = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): return model.decode( decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , ) with self.subTest('JIT Enabled' ): _lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __a ( self ): for model_class_name in self.all_model_classes: _lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id _lowercase : int = model(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase )
677
0
'''simple docstring''' from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Optional[Any] = "philschmid/bart-large-cnn-samsum" _UpperCamelCase : Any = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) _UpperCamelCase : Optional[int] = "summarizer" _UpperCamelCase : Optional[Any] = AutoTokenizer _UpperCamelCase : Dict = AutoModelForSeqaSeqLM _UpperCamelCase : Any = ["text"] _UpperCamelCase : str = ["text"] def __a ( self , _lowerCAmelCase ): return self.pre_processor(_lowerCAmelCase , return_tensors='pt' , truncation=_lowerCAmelCase ) def __a ( self , _lowerCAmelCase ): return self.model.generate(**_lowerCAmelCase )[0] def __a ( self , _lowerCAmelCase ): return self.pre_processor.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
706
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Dict = "longformer" def __init__( self , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 7_6_8 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 3_0_7_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ): super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) _lowercase : Optional[int] = attention_window _lowercase : str = sep_token_id _lowercase : Optional[Any] = bos_token_id _lowercase : List[Any] = eos_token_id _lowercase : Optional[Any] = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Optional[int] = num_attention_heads _lowercase : List[str] = hidden_act _lowercase : List[str] = intermediate_size _lowercase : List[Any] = hidden_dropout_prob _lowercase : str = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : int = type_vocab_size _lowercase : Optional[int] = initializer_range _lowercase : List[Any] = layer_norm_eps _lowercase : List[str] = onnx_export class lowerCAmelCase_ ( __snake_case ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ): super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowercase : str = True @property def __a ( self ): if self.task == "multiple-choice": _lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowercase : int = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('global_attention_mask', dynamic_axis), ] ) @property def __a ( self ): _lowercase : Optional[int] = super().outputs if self.task == "default": _lowercase : List[str] = {0: 'batch'} return outputs @property def __a ( self ): return 1E-4 @property def __a ( self ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 1_4 ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ): _lowercase : int = super().generate_dummy_inputs( preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly _lowercase : str = torch.zeros_like(inputs['input_ids'] ) # make every second token global _lowercase : Any = 1 return inputs
677
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __a ( self ): torch.manual_seed(0 ) _lowercase : Dict = UNetaDModel( sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return model @property def __a ( self ): torch.manual_seed(0 ) _lowercase : int = UNetaDConditionModel( sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , cross_attention_dim=1_0 , ) return model @property def __a ( self ): torch.manual_seed(0 ) _lowercase : Any = AutoencoderKL( sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , ) _lowercase : List[Any] = UNetaDModel( sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('AttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'AttnUpBlock2D') , ) return vqvae, unet @slow def __a ( self ): _lowercase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator _lowercase : Union[str, Any] = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) _lowercase : Union[str, Any] = DDPMScheduler() _lowercase : int = AudioDiffusionPipeline(vqvae=__a , unet=self.dummy_unet , mel=__a , scheduler=__a ) _lowercase : Optional[int] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) _lowercase : List[str] = torch.Generator(device=__a ).manual_seed(4_2 ) _lowercase : List[str] = pipe(generator=__a , steps=4 ) _lowercase : int = output.audios[0] _lowercase : Tuple = output.images[0] _lowercase : str = torch.Generator(device=__a ).manual_seed(4_2 ) _lowercase : str = pipe(generator=__a , steps=4 , return_dict=__a ) _lowercase : Optional[int] = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) _lowercase : Tuple = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0] _lowercase : Union[str, Any] = np.frombuffer(image_from_tuple.tobytes() , dtype='uint8' )[:1_0] _lowercase : List[Any] = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 _lowercase : Optional[Any] = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) _lowercase : Tuple = DDIMScheduler() _lowercase : Any = self.dummy_vqvae_and_unet _lowercase : str = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__a , scheduler=__a ) _lowercase : Union[str, Any] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) np.random.seed(0 ) _lowercase : Optional[Any] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) _lowercase : Dict = torch.Generator(device=__a ).manual_seed(4_2 ) _lowercase : Dict = pipe(raw_audio=__a , generator=__a , start_step=5 , steps=1_0 ) _lowercase : Optional[int] = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) _lowercase : int = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0] _lowercase : int = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 _lowercase : int = self.dummy_unet_condition _lowercase : Optional[Any] = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=__a , mel=__a , scheduler=__a ) _lowercase : str = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) np.random.seed(0 ) _lowercase : Optional[int] = torch.rand((1, 1, 1_0) ) _lowercase : Any = pipe(generator=__a , encoding=__a ) _lowercase : int = output.images[0] _lowercase : str = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0] _lowercase : Dict = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ): _lowercase : Any = torch_device _lowercase : Optional[Any] = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' ) _lowercase : Any = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) _lowercase : Any = torch.Generator(device=__a ).manual_seed(4_2 ) _lowercase : List[str] = pipe(generator=__a ) _lowercase : Any = output.audios[0] _lowercase : Optional[int] = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] _lowercase : List[Any] = np.frombuffer(image.tobytes() , dtype='uint8' )[:1_0] _lowercase : int = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
707
from __future__ import annotations def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool: return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
677
0
from __future__ import annotations import numpy as np def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> tuple[np.ndarray, np.ndarray]: _lowercase , _lowercase : Tuple = np.shape(__UpperCamelCase ) if rows != columns: _lowercase : Dict = ( '\'table\' has to be of square shaped array but got a ' F"""{rows}x{columns} array:\n{table}""" ) raise ValueError(__UpperCamelCase ) _lowercase : int = np.zeros((rows, columns) ) _lowercase : List[Any] = np.zeros((rows, columns) ) for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): _lowercase : int = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) _lowercase : Dict = (table[i][j] - total) / upper[j][j] _lowercase : Tuple = 1 for j in range(__UpperCamelCase , __UpperCamelCase ): _lowercase : str = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) ) _lowercase : Tuple = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
708
import math def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ) -> list: _lowercase : List[str] = end or len(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _lowercase : Dict = i _lowercase : str = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _lowercase : Optional[Any] = array[temp_index - 1] temp_index -= 1 _lowercase : Optional[Any] = temp_index_value return array def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: # Max Heap _lowercase : List[str] = index _lowercase : List[str] = 2 * index + 1 # Left Node _lowercase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _lowercase : Any = left_index if right_index < heap_size and array[largest] < array[right_index]: _lowercase : str = right_index if largest != index: _lowercase , _lowercase : List[str] = array[largest], array[index] heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list: _lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) for i in range(n // 2 , -1 , -1 ): heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in range(n - 1 , 0 , -1 ): _lowercase , _lowercase : List[Any] = array[0], array[i] heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE ) return array def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: _lowercase : Optional[Any] = low _lowercase : Tuple = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _lowercase , _lowercase : Tuple = array[j], array[i] i += 1 def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list: if len(SCREAMING_SNAKE_CASE ) == 0: return array _lowercase : List[str] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) ) _lowercase : str = 16 return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list: while end - start > size_threshold: if max_depth == 0: return heap_sort(SCREAMING_SNAKE_CASE ) max_depth -= 1 _lowercase : int = median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 ) _lowercase : str = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = p return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = input("Enter numbers separated by a comma : ").strip() UpperCamelCase = [float(item) for item in user_input.split(",")] print(sort(unsorted))
677
0
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase_ ( _lowerCAmelCase ): def __a ( self ): _lowercase : Any = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_lowerCAmelCase , 'embed_dim' ) ) self.parent.assertTrue(hasattr(_lowerCAmelCase , 'num_heads' ) ) class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=6_4 , _lowerCAmelCase=3 , _lowerCAmelCase=[1_6, 4_8, 9_6] , _lowerCAmelCase=[1, 3, 6] , _lowerCAmelCase=[1, 2, 1_0] , _lowerCAmelCase=[7, 3, 3] , _lowerCAmelCase=[4, 2, 2] , _lowerCAmelCase=[2, 1, 1] , _lowerCAmelCase=[2, 2, 2] , _lowerCAmelCase=[False, False, True] , _lowerCAmelCase=[0.0, 0.0, 0.0] , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=2 , ): _lowercase : str = parent _lowercase : Optional[Any] = batch_size _lowercase : Any = image_size _lowercase : Any = patch_sizes _lowercase : Tuple = patch_stride _lowercase : Optional[int] = patch_padding _lowercase : Optional[int] = is_training _lowercase : str = use_labels _lowercase : List[str] = num_labels _lowercase : List[str] = num_channels _lowercase : Dict = embed_dim _lowercase : Any = num_heads _lowercase : Union[str, Any] = stride_kv _lowercase : List[str] = depth _lowercase : List[str] = cls_token _lowercase : Tuple = attention_drop_rate _lowercase : Dict = initializer_range _lowercase : Tuple = layer_norm_eps def __a ( self ): _lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowercase : List[Any] = None if self.use_labels: # create a random int32 tensor of given shape _lowercase : List[str] = ids_tensor([self.batch_size] , self.num_labels ) _lowercase : Dict = self.get_config() return config, pixel_values, labels def __a ( self ): return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : str = TFCvtModel(config=_lowerCAmelCase ) _lowercase : List[Any] = model(_lowerCAmelCase , training=_lowerCAmelCase ) _lowercase : Tuple = (self.image_size, self.image_size) _lowercase , _lowercase : List[Any] = image_size[0], image_size[1] for i in range(len(self.depth ) ): _lowercase : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _lowercase : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Dict = self.num_labels _lowercase : Any = TFCvtForImageClassification(_lowerCAmelCase ) _lowercase : int = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self ): _lowercase : Tuple = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase : Tuple = config_and_inputs _lowercase : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): _UpperCamelCase : Optional[Any] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () _UpperCamelCase : Optional[Any] = ( {'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification} if is_tf_available() else {} ) _UpperCamelCase : str = False _UpperCamelCase : int = False _UpperCamelCase : List[Any] = False _UpperCamelCase : str = False _UpperCamelCase : str = False def __a ( self ): _lowercase : List[str] = TFCvtModelTester(self ) _lowercase : Optional[int] = TFCvtConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=3_7 ) def __a ( self ): self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='Cvt does not output attentions' ) def __a ( self ): pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def __a ( self ): pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def __a ( self ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) def __a ( self ): super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __a ( self ): super().test_keras_fit() @unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' ) def __a ( self ): _lowercase : Optional[Any] = tf.keras.mixed_precision.Policy('mixed_float16' ) tf.keras.mixed_precision.set_global_policy(_lowerCAmelCase ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('float32' ) def __a ( self ): _lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : List[Any] = model_class(_lowerCAmelCase ) _lowercase : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase : Tuple = [*signature.parameters.keys()] _lowercase : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def __a ( self ): def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : str = model_class(_lowerCAmelCase ) _lowercase : Dict = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) _lowercase : List[Any] = outputs.hidden_states _lowercase : str = len(self.model_tester.depth ) self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : Tuple = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowercase : int = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __a ( self ): _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __a ( self ): _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase ) @slow def __a ( self ): for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : List[Any] = TFCvtModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def __magic_name__ ( ) -> Tuple: _lowercase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def __a ( self ): return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __a ( self ): _lowercase : List[str] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) _lowercase : Dict = self.default_image_processor _lowercase : Dict = prepare_img() _lowercase : List[str] = image_processor(images=_lowerCAmelCase , return_tensors='tf' ) # forward pass _lowercase : int = model(**_lowerCAmelCase ) # verify the logits _lowercase : Tuple = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) _lowercase : Optional[int] = tf.constant([0.92_85, 0.90_15, -0.31_50] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _lowerCAmelCase , atol=1E-4 ) )
709
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["CLIPFeatureExtractor"] UpperCamelCase = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple: return str(snake_case_ ) == str(snake_case_ )[::-1] def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict: return int(snake_case_ ) + int(str(snake_case_ )[::-1] ) def __magic_name__ ( SCREAMING_SNAKE_CASE = 10_000 ) -> int: _lowercase : Dict = [] for num in range(1 , snake_case_ ): _lowercase : Dict = 0 _lowercase : str = num while iterations < 50: _lowercase : Any = sum_reverse(snake_case_ ) iterations += 1 if is_palindrome(snake_case_ ): break else: lychrel_nums.append(snake_case_ ) return len(snake_case_ ) if __name__ == "__main__": print(f'''{solution() = }''')
710
from collections.abc import Sequence def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: _lowercase : Optional[Any] = 0.0 for coeff in reversed(SCREAMING_SNAKE_CASE ): _lowercase : Optional[int] = result * x + coeff return result if __name__ == "__main__": UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCamelCase = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
677
0
import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ): @require_torch def __a ( self ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _lowercase : Optional[Any] = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' _lowercase : int = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' _lowercase : Dict = ''' import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache _lowercase : Any = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(UpperCamelCase__ ) BertModel.from_pretrained(UpperCamelCase__ ) BertTokenizer.from_pretrained(UpperCamelCase__ ) pipeline(task='fill-mask' , model=UpperCamelCase__ ) # baseline - just load from_pretrained with normal network _lowercase : List[Any] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed _lowercase : List[Any] = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _lowercase : Any = '''1''' _lowercase : List[str] = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def __a ( self ): # python one-liner segments # this must be loaded before socket.socket is monkey-patched _lowercase : str = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' _lowercase : List[str] = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' _lowercase : str = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache _lowercase : Any = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(UpperCamelCase__ ) BertModel.from_pretrained(UpperCamelCase__ ) BertTokenizer.from_pretrained(UpperCamelCase__ ) pipeline(task='fill-mask' , model=UpperCamelCase__ ) # baseline - just load from_pretrained with normal network _lowercase : int = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed _lowercase : Optional[int] = self.get_env() _lowercase : int = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def __a ( self ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _lowercase : List[str] = ''' from transformers import BertConfig, BertModel, BertTokenizer ''' _lowercase : Union[str, Any] = ''' mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") ''' _lowercase : Dict = ''' import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket ''' # baseline - just load from_pretrained with normal network _lowercase : int = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed _lowercase : Dict = self.get_env() _lowercase : Optional[int] = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # next emulate no network _lowercase : List[Any] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _lowercase : List[Any] = '''1''' _lowercase : Dict = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) @require_torch def __a ( self ): _lowercase : int = ''' from transformers import pipeline ''' _lowercase : str = ''' mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) ''' _lowercase : Optional[int] = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket ''' _lowercase : int = self.get_env() _lowercase : Optional[int] = '''1''' _lowercase : int = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] _lowercase : Dict = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , ) @require_torch def __a ( self ): _lowercase : str = ''' from transformers import AutoModel ''' _lowercase : List[Any] = ''' mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") ''' # baseline - just load from_pretrained with normal network _lowercase : int = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed _lowercase : Tuple = self.get_env() _lowercase : str = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _lowercase : Any = '''1''' _lowercase : Any = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('success' , result.stdout.decode() )
711
from __future__ import annotations class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase=None ): _lowercase : int = data _lowercase : Union[str, Any] = None def __repr__( self ): _lowercase : Dict = [] _lowercase : Tuple = self while temp: string_rep.append(F"""{temp.data}""" ) _lowercase : Optional[Any] = temp.next return "->".join(_lowerCAmelCase ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: if not elements_list: raise Exception('The Elements List is empty' ) _lowercase : Union[str, Any] = Node(elements_list[0] ) for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): _lowercase : Optional[int] = Node(elements_list[i] ) _lowercase : List[Any] = current.next return head def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None: if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): print_reverse(head_node.next ) print(head_node.data ) def __magic_name__ ( ) -> List[str]: from doctest import testmod testmod() _lowercase : int = make_linked_list([14, 52, 14, 12, 43] ) print('Linked List:' ) print(SCREAMING_SNAKE_CASE ) print('Elements in Reverse:' ) print_reverse(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
677
0
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) UpperCamelCase : Any = [ "cross_validation.py", "gradient_accumulation.py", "local_sgd.py", "multi_process_metrics.py", "memory.py", "automatic_gradient_accumulation.py", "fsdp_with_peak_mem_tracking.py", "deepspeed_with_config_support.py", "megatron_lm_gpt_pretraining.py", ] class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None ): _lowercase : Union[str, Any] = None _lowercase : Union[str, Any] = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) _lowercase : Union[str, Any] = os.path.abspath('examples' ) for item in os.listdir(_lowerCamelCase ): if item not in EXCLUDE_EXAMPLES: _lowercase : int = os.path.join(_lowerCamelCase , _lowerCamelCase ) if os.path.isfile(_lowerCamelCase ) and ".py" in item_path: with self.subTest( tested_script=_lowerCamelCase , feature_script=_lowerCamelCase , tested_section='main()' if parser_only else 'training_function()' , ): _lowercase : List[Any] = compare_against_test( os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) _lowercase : List[str] = '\n'.join(_lowerCamelCase ) if special_strings is not None: for string in special_strings: _lowercase : List[Any] = diff.replace(_lowerCamelCase , '' ) self.assertEqual(_lowerCamelCase , '' ) def __a ( self ): self.one_complete_example('complete_nlp_example.py' , _lowerCamelCase ) self.one_complete_example('complete_nlp_example.py' , _lowerCamelCase ) def __a ( self ): _lowercase : List[Any] = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) _lowercase : str = [ ' ' * 1_6 + '{\n\n', ' ' * 2_0 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 2_0 + '"f1": eval_metric["f1"],\n\n', ' ' * 2_0 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 2_0 + '"epoch": epoch,\n\n', ' ' * 1_6 + '},\n\n', ' ' * 1_6 + 'step=epoch,\n', ' ' * 1_2, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) self.one_complete_example('complete_cv_example.py' , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} ) class lowerCAmelCase_ ( lowerCAmelCase__ ): _UpperCamelCase : str = False @classmethod def __a ( cls ): super().setUpClass() _lowercase : int = tempfile.mkdtemp() _lowercase : List[Any] = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) _lowercase : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def __a ( cls ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def __a ( self ): _lowercase : List[str] = F"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def __a ( self ): _lowercase : List[Any] = F"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n """.split() _lowercase : str = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def __a ( self ): _lowercase : Tuple = F"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n """.split() _lowercase : Tuple = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase ) self.assertNotIn('epoch 0:' , _lowerCamelCase ) self.assertIn('epoch 1:' , _lowerCamelCase ) def __a ( self ): _lowercase : List[str] = F"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n """.split() _lowercase : Any = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase ) if torch.cuda.is_available(): _lowercase : Optional[int] = torch.cuda.device_count() else: _lowercase : List[Any] = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , _lowerCamelCase ) self.assertIn('epoch 1:' , _lowerCamelCase ) else: self.assertIn('epoch 0:' , _lowerCamelCase ) self.assertIn('epoch 1:' , _lowerCamelCase ) @slow def __a ( self ): _lowercase : Tuple = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): _lowercase : Dict = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase ) _lowercase : Optional[int] = re.findall('({.+})' , _lowerCamelCase ) _lowercase : Optional[Any] = [r for r in results if 'accuracy' in r][-1] _lowercase : List[str] = ast.literal_eval(_lowerCamelCase ) self.assertGreaterEqual(results['accuracy'] , 0.75 ) def __a ( self ): _lowercase : int = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def __a ( self ): with tempfile.TemporaryDirectory() as tmpdir: _lowercase : Optional[Any] = F"""\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , 'tracking' ) ) ) def __a ( self ): _lowercase : str = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def __a ( self ): _lowercase : str = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
712
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007 def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut: return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut: return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2) if __name__ == "__main__": def __magic_name__ ( ) -> None: from timeit import timeit print('Without Numpy' ) print( timeit( 'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) print('With Numpy' ) print( timeit( 'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) benchmark()
677
0
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 UpperCamelCase = data_utils.TransfoXLTokenizer UpperCamelCase = data_utils.TransfoXLCorpus UpperCamelCase = data_utils UpperCamelCase = data_utils def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__snake_case , 'rb' ) as fp: _lowercase : Optional[int] = pickle.load(__snake_case , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) _lowercase : Tuple = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" ) _lowercase : Union[str, Any] = corpus.vocab.__dict__ torch.save(__snake_case , __snake_case ) _lowercase : str = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , __snake_case ) _lowercase : str = pytorch_dump_folder_path + "/" + CORPUS_NAME print(F"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(__snake_case , __snake_case ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model _lowercase : Tuple = os.path.abspath(__snake_case ) _lowercase : int = os.path.abspath(__snake_case ) print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": _lowercase : Dict = TransfoXLConfig() else: _lowercase : List[Any] = TransfoXLConfig.from_json_file(__snake_case ) print(F"""Building PyTorch model from configuration: {config}""" ) _lowercase : str = TransfoXLLMHeadModel(__snake_case ) _lowercase : Any = load_tf_weights_in_transfo_xl(__snake_case , __snake_case , __snake_case ) # Save pytorch-model _lowercase : Dict = os.path.join(__snake_case , __snake_case ) _lowercase : int = os.path.join(__snake_case , __snake_case ) print(F"""Save PyTorch model to {os.path.abspath(__snake_case )}""" ) torch.save(model.state_dict() , __snake_case ) print(F"""Save configuration file to {os.path.abspath(__snake_case )}""" ) with open(__snake_case , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) UpperCamelCase = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
713
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { "configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", "Swinv2Model", "Swinv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase = 16 UpperCamelCase = 32 def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 ) -> Any: _lowercase : str = AutoTokenizer.from_pretrained('bert-base-cased' ) _lowercase : Union[str, Any] = load_dataset('glue' , 'mrpc' ) def tokenize_function(SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _lowercase : str = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a__ , max_length=a__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _lowercase : str = datasets.map( a__ , batched=a__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowercase : Tuple = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _lowercase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _lowercase : List[str] = 16 elif accelerator.mixed_precision != "no": _lowercase : Dict = 8 else: _lowercase : Tuple = None return tokenizer.pad( a__ , padding='longest' , max_length=a__ , pad_to_multiple_of=a__ , return_tensors='pt' , ) # Instantiate dataloaders. _lowercase : Optional[Any] = DataLoader( tokenized_datasets['train'] , shuffle=a__ , collate_fn=a__ , batch_size=a__ ) _lowercase : List[str] = DataLoader( tokenized_datasets['validation'] , shuffle=a__ , collate_fn=a__ , batch_size=a__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase = mocked_dataloaders # noqa: F811 def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: if os.environ.get('TESTING_MOCKED_DATALOADERS' , a__ ) == "1": _lowercase : Any = 2 # Initialize accelerator _lowercase : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowercase : Optional[int] = config['lr'] _lowercase : int = int(config['num_epochs'] ) _lowercase : Optional[int] = int(config['seed'] ) _lowercase : int = int(config['batch_size'] ) _lowercase : Optional[Any] = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation _lowercase : Optional[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _lowercase : Tuple = batch_size // MAX_GPU_BATCH_SIZE _lowercase : Tuple = MAX_GPU_BATCH_SIZE set_seed(a__ ) _lowercase , _lowercase : Any = get_dataloaders(a__ , a__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowercase : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=a__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowercase : Any = model.to(accelerator.device ) # Instantiate optimizer _lowercase : Union[str, Any] = AdamW(params=model.parameters() , lr=a__ ) # Instantiate scheduler _lowercase : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=a__ , num_warmup_steps=100 , num_training_steps=(len(a__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = accelerator.prepare( a__ , a__ , a__ , a__ , a__ ) # Now we train the model for epoch in range(a__ ): model.train() for step, batch in enumerate(a__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _lowercase : Tuple = model(**a__ ) _lowercase : List[Any] = outputs.loss _lowercase : List[str] = loss / gradient_accumulation_steps accelerator.backward(a__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() _lowercase : List[str] = 0 for step, batch in enumerate(a__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowercase : Tuple = model(**a__ ) _lowercase : List[str] = outputs.logits.argmax(dim=-1 ) _lowercase , _lowercase : Dict = accelerator.gather((predictions, batch['labels']) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(a__ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples _lowercase : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen] _lowercase : Dict = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=a__ , references=a__ , ) _lowercase : Optional[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , a__ ) def __magic_name__ ( ) -> str: _lowercase : List[str] = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=a__ , default=a__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) _lowercase : Optional[Any] = parser.parse_args() _lowercase : Any = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(a__ , a__ ) if __name__ == "__main__": main()
714
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase = { "vocab_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt" ), "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json" ), "google/electra-base-generator": ( "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json" ), "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json" ), }, } UpperCamelCase = { "google/electra-small-generator": 512, "google/electra-base-generator": 512, "google/electra-large-generator": 512, "google/electra-small-discriminator": 512, "google/electra-base-discriminator": 512, "google/electra-large-discriminator": 512, } UpperCamelCase = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Any = VOCAB_FILES_NAMES _UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[str] = ElectraTokenizer def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ): super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) _lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars ): _lowercase : Any = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) ) _lowercase : Dict = do_lower_case _lowercase : Optional[Any] = strip_accents _lowercase : Any = tokenize_chinese_chars _lowercase : Tuple = normalizer_class(**_lowerCAmelCase ) _lowercase : Union[str, Any] = do_lower_case def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ): _lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : str = [self.sep_token_id] _lowercase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : Any = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
677
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowerCAmelCase_ ( UpperCamelCase__ ): _UpperCamelCase : List[Any] = "Salesforce/blip-image-captioning-base" _UpperCamelCase : Optional[int] = ( "This is a tool that generates a description of an image. It takes an input named `image` which should be the " "image to caption, and returns a text that contains the description in English." ) _UpperCamelCase : Any = "image_captioner" _UpperCamelCase : Optional[Any] = AutoModelForVisionaSeq _UpperCamelCase : Any = ["image"] _UpperCamelCase : List[Any] = ["text"] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ): requires_backends(self , ['vision'] ) super().__init__(*__A , **__A ) def __a ( self , _lowerCAmelCase ): return self.pre_processor(images=__A , return_tensors='pt' ) def __a ( self , _lowerCAmelCase ): return self.model.generate(**__A ) def __a ( self , _lowerCAmelCase ): return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0].strip()
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
716
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: for attribute in key.split('.' ): _lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: _lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: _lowercase : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowercase : List[str] = value elif weight_type == "weight_g": _lowercase : Any = value elif weight_type == "weight_v": _lowercase : Tuple = value elif weight_type == "bias": _lowercase : List[str] = value else: _lowercase : Dict = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Optional[int] = [] _lowercase : Optional[int] = fairseq_model.state_dict() _lowercase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _lowercase : Dict = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) _lowercase : int = True else: for key, mapped_key in MAPPING.items(): _lowercase : Union[str, Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned): _lowercase : Union[str, Any] = True if "*" in mapped_key: _lowercase : Dict = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2] _lowercase : Dict = mapped_key.replace('*' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: _lowercase : Optional[int] = 'weight_g' elif "weight_v" in name: _lowercase : Optional[Any] = 'weight_v' elif "weight" in name: _lowercase : str = 'weight' elif "bias" in name: _lowercase : Any = 'bias' else: _lowercase : str = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Any = full_name.split('conv_layers.' )[-1] _lowercase : Any = name.split('.' ) _lowercase : Optional[Any] = int(items[0] ) _lowercase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowercase : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowercase : List[str] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _lowercase : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowercase : List[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) @torch.no_grad() def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: if config_path is not None: _lowercase : Optional[int] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: _lowercase : List[Any] = HubertConfig() if is_finetuned: if dict_path: _lowercase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowercase : Dict = target_dict.pad_index _lowercase : Dict = target_dict.bos_index _lowercase : Tuple = target_dict.eos_index _lowercase : List[Any] = len(target_dict.symbols ) _lowercase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) ) return os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , SCREAMING_SNAKE_CASE ) _lowercase : int = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , ) _lowercase : str = True if config.feat_extract_norm == 'layer' else False _lowercase : Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) _lowercase : Tuple = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE ) else: _lowercase : List[Any] = HubertModel(SCREAMING_SNAKE_CASE ) if is_finetuned: _lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: _lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _lowercase : int = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) UpperCamelCase = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
677
0
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list[int]: _lowercase : List[str] = int(SCREAMING_SNAKE_CASE ) # Initialize Result _lowercase : Optional[Any] = [] # Traverse through all denomination for denomination in reversed(SCREAMING_SNAKE_CASE ): # Find denominations while int(SCREAMING_SNAKE_CASE ) >= int(SCREAMING_SNAKE_CASE ): total_value -= int(SCREAMING_SNAKE_CASE ) answer.append(SCREAMING_SNAKE_CASE ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCamelCase = [] UpperCamelCase = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): UpperCamelCase = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(f'''Denomination {i}: ''').strip())) UpperCamelCase = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter UpperCamelCase = [1, 2, 5, 10, 20, 50, 100, 500, 2_000] UpperCamelCase = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(f'''Following is minimal change for {value}: ''') UpperCamelCase = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
717
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ): _lowercase : List[str] = parent _lowercase : Optional[Any] = batch_size _lowercase : str = seq_length _lowercase : Dict = is_training _lowercase : Optional[int] = use_input_mask _lowercase : List[Any] = use_token_type_ids _lowercase : Union[str, Any] = use_labels _lowercase : Optional[Any] = vocab_size _lowercase : Optional[Any] = hidden_size _lowercase : str = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[Any] = hidden_act _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : int = max_position_embeddings _lowercase : str = type_vocab_size _lowercase : Tuple = type_sequence_label_size _lowercase : Dict = initializer_range _lowercase : List[Any] = num_labels _lowercase : List[str] = num_choices _lowercase : Dict = scope _lowercase : List[Any] = range_bbox def __a ( self ): _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _lowercase : List[str] = bbox[i, j, 3] _lowercase : Optional[int] = bbox[i, j, 1] _lowercase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowercase : Dict = bbox[i, j, 2] _lowercase : Dict = bbox[i, j, 0] _lowercase : int = t _lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase ) _lowercase : Any = None if self.use_input_mask: _lowercase : int = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : Tuple = None if self.use_token_type_ids: _lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : Tuple = None _lowercase : Union[str, Any] = None _lowercase : List[str] = None if self.use_labels: _lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : str = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : Any = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase ) _lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase ) _lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : str = self.num_labels _lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase ) _lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Any = self.num_labels _lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase ) _lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase ) _lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self ): _lowercase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : List[Any] = config_and_inputs _lowercase : Optional[Any] = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCamelCase : Optional[int] = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _UpperCamelCase : Union[str, Any] = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _UpperCamelCase : str = False _UpperCamelCase : List[str] = True _UpperCamelCase : Tuple = 10 def __a ( self ): _lowercase : Optional[int] = TFLayoutLMModelTester(self ) _lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __a ( self ): _lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) @slow def __a ( self ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def __a ( self ): pass def __magic_name__ ( ) -> Optional[int]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off _lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231 _lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 _lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231 _lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) _lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @slow def __a ( self ): _lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) # test the sequence output on [0, :3, :3] _lowercase : Optional[Any] = tf.convert_to_tensor( [[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) ) # test the pooled output on [1, :3] _lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) ) @slow def __a ( self ): # initialize model with randomly initialized sequence classification head _lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Any = model( input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar _lowercase : List[Any] = outputs.loss _lowercase : Any = (2,) self.assertEqual(loss.shape , _lowerCAmelCase ) # test the shape of the logits _lowercase : str = outputs.logits _lowercase : Dict = (2, 2) self.assertEqual(logits.shape , _lowerCAmelCase ) @slow def __a ( self ): # initialize model with randomly initialized token classification head _lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Dict = model( input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) # test the shape of the logits _lowercase : Dict = outputs.logits _lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) ) self.assertEqual(logits.shape , _lowerCAmelCase ) @slow def __a ( self ): # initialize model with randomly initialized token classification head _lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) # test the shape of the logits _lowercase : Any = tf.convert_to_tensor((2, 2_5) ) self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase ) self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
677
0
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } UpperCamelCase = { "allenai/led-base-16384": 16_384, } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : List[str] = VOCAB_FILES_NAMES _UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[Any] = LEDTokenizer _UpperCamelCase : List[str] = ["input_ids", "attention_mask"] def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="replace" , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=False , _lowerCAmelCase=True , **_lowerCAmelCase , ): super().__init__( __A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , ) _lowercase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __A ) != add_prefix_space: _lowercase : Optional[Any] = getattr(__A , pre_tok_state.pop('type' ) ) _lowercase : List[str] = add_prefix_space _lowercase : Tuple = pre_tok_class(**__A ) _lowercase : List[str] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _lowercase : Union[str, Any] = 'post_processor' _lowercase : Dict = getattr(self.backend_tokenizer , __A , __A ) if tokenizer_component_instance: _lowercase : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowercase : Dict = tuple(state['sep'] ) if "cls" in state: _lowercase : Optional[int] = tuple(state['cls'] ) _lowercase : Dict = False if state.get('add_prefix_space' , __A ) != add_prefix_space: _lowercase : Tuple = add_prefix_space _lowercase : Union[str, Any] = True if state.get('trim_offsets' , __A ) != trim_offsets: _lowercase : Any = trim_offsets _lowercase : Optional[Any] = True if changes_to_apply: _lowercase : Optional[Any] = getattr(__A , state.pop('type' ) ) _lowercase : Optional[int] = component_class(**__A ) setattr(self.backend_tokenizer , __A , __A ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def __a ( self ): if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def __a ( self , _lowerCAmelCase ): _lowercase : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value _lowercase : Optional[int] = value def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ): _lowercase : Tuple = kwargs.get('is_split_into_words' , __A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*__A , **__A ) def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ): _lowercase : List[str] = kwargs.get('is_split_into_words' , __A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._encode_plus(*__A , **__A ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : str = self._tokenizer.model.save(__A , name=__A ) return tuple(__A ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ): _lowercase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : Dict = [self.sep_token_id] _lowercase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , _lowerCAmelCase = None , _lowerCAmelCase = None , ): _lowercase : Tuple = super()._pad( encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , ) # Load from model defaults if return_attention_mask is None: _lowercase : int = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: _lowercase : List[Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. _lowercase : Any = len(encoded_inputs['global_attention_mask'] ) != len(__A ) if needs_to_be_padded: _lowercase : Optional[int] = len(__A ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` _lowercase : List[Any] = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": _lowercase : List[str] = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
718
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): _lowercase : List[str] = logging.get_logger() # the current default level is logging.WARNING _lowercase : Union[str, Any] = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(_lowerCAmelCase ) def __a ( self ): _lowercase : List[str] = logging.get_verbosity() _lowercase : int = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : Tuple = 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(_lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def __a ( self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var _lowercase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : int = os.getenv('TRANSFORMERS_VERBOSITY' , _lowerCAmelCase ) _lowercase : Optional[Any] = logging.log_levels[env_level_str] _lowercase : Dict = logging.get_verbosity() self.assertEqual( _lowerCAmelCase , _lowerCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level _lowercase : Any = '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def __a ( self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() _lowercase : Tuple = logging.logging.getLogger() with CaptureLogger(_lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def __a ( self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() _lowercase : str = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : List[str] = 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning_advice(_lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning_advice(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def __magic_name__ ( ) -> List[str]: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
677
0
import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=() , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="no" , SCREAMING_SNAKE_CASE="29500" ) -> Union[str, Any]: _lowercase : Tuple = False _lowercase : Union[str, Any] = False if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ): _lowercase : List[str] = True elif "IPython" in sys.modules: _lowercase : str = """google.colab""" in str(sys.modules['IPython'].get_ipython() ) try: _lowercase : int = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" ) if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , _lowerCamelCase ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( 'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside ' 'your training function. Restart your notebook and make sure no cells initializes an ' '`Accelerator`.' ) if num_processes is None: _lowercase : int = 8 _lowercase : Any = PrepareForLaunch(_lowerCamelCase , distributed_type='TPU' ) print(F"""Launching a training on {num_processes} TPU cores.""" ) xmp.spawn(_lowerCamelCase , args=_lowerCamelCase , nprocs=_lowerCamelCase , start_method='fork' ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print('Launching training on one GPU.' ) else: print('Launching training on one CPU.' ) function(*_lowerCamelCase ) else: if num_processes is None: raise ValueError( 'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( 'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized ' 'inside your training function. Restart your notebook and make sure no cells initializes an ' '`Accelerator`.' ) if torch.cuda.is_initialized(): raise ValueError( 'To launch a multi-GPU training from your notebook, you need to avoid running any instruction ' 'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA ' 'function.' ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_lowerCamelCase , master_addr='127.0.01' , master_port=_lowerCamelCase , mixed_precision=_lowerCamelCase ): _lowercase : str = PrepareForLaunch(_lowerCamelCase , distributed_type='MULTI_GPU' ) print(F"""Launching training on {num_processes} GPUs.""" ) try: start_processes(_lowerCamelCase , args=_lowerCamelCase , nprocs=_lowerCamelCase , start_method='fork' ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( 'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. ' 'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. ' 'Please review your imports and test them when running the `notebook_launcher()` to identify ' 'which one is problematic.' ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): _lowercase : List[str] = """1""" print('Launching training on MPS.' ) elif torch.cuda.is_available(): print('Launching training on one GPU.' ) else: print('Launching training on CPU.' ) function(*_lowerCamelCase ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=() , SCREAMING_SNAKE_CASE=2 ) -> List[str]: from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=_lowerCamelCase , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ): _lowercase : Union[str, Any] = PrepareForLaunch(_lowerCamelCase , debug=_lowerCamelCase ) start_processes(_lowerCamelCase , args=_lowerCamelCase , nprocs=_lowerCamelCase , start_method='fork' )
719
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): UpperCamelCase = "pt" elif is_tf_available(): UpperCamelCase = "tf" else: UpperCamelCase = "jax" class lowerCAmelCase_ ( __snake_case , unittest.TestCase ): _UpperCamelCase : Dict = PerceiverTokenizer _UpperCamelCase : str = False def __a ( self ): super().setUp() _lowercase : List[Any] = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def __a ( self , **_lowerCAmelCase ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=2_0 , _lowerCAmelCase=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. _lowercase : Union[str, Any] = [] for i in range(len(_lowerCAmelCase ) ): try: _lowercase : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) _lowercase : List[Any] = list(filter(lambda _lowerCAmelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _lowerCAmelCase ) ) _lowercase : Union[str, Any] = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCAmelCase ) , _lowerCAmelCase ) ) if max_length is not None and len(_lowerCAmelCase ) > max_length: _lowercase : Any = toks[:max_length] if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0: while len(_lowerCAmelCase ) < min_length: _lowercase : Optional[Any] = toks + toks # toks_str = [t[1] for t in toks] _lowercase : Optional[Any] = [t[0] for t in toks] # Ensure consistency _lowercase : Any = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) if " " not in output_txt and len(_lowerCAmelCase ) > 1: _lowercase : List[str] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase ) ) if with_prefix_space: _lowercase : List[Any] = ' ' + output_txt _lowercase : Dict = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) return output_txt, output_ids def __a ( self ): _lowercase : Dict = self.perceiver_tokenizer _lowercase : Optional[Any] = 'Unicode €.' _lowercase : str = tokenizer(_lowerCAmelCase ) _lowercase : int = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded['input_ids'] , _lowerCAmelCase ) # decoding _lowercase : List[Any] = tokenizer.decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , '[CLS]Unicode €.[SEP]' ) _lowercase : Union[str, Any] = tokenizer('e è é ê ë' ) _lowercase : List[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded['input_ids'] , _lowerCAmelCase ) # decoding _lowercase : int = tokenizer.decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def __a ( self ): _lowercase : List[str] = self.perceiver_tokenizer _lowercase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off _lowercase : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on _lowercase : List[Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) if FRAMEWORK != "jax": _lowercase : int = list(batch.input_ids.numpy()[0] ) else: _lowercase : List[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def __a ( self ): _lowercase : List[Any] = self.perceiver_tokenizer _lowercase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _lowercase : List[str] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _lowerCAmelCase ) self.assertIn('attention_mask' , _lowerCAmelCase ) self.assertNotIn('decoder_input_ids' , _lowerCAmelCase ) self.assertNotIn('decoder_attention_mask' , _lowerCAmelCase ) def __a ( self ): _lowercase : Optional[int] = self.perceiver_tokenizer _lowercase : Optional[Any] = [ 'Summary of the text.', 'Another summary.', ] _lowercase : Optional[int] = tokenizer( text_target=_lowerCAmelCase , max_length=3_2 , padding='max_length' , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) self.assertEqual(3_2 , targets['input_ids'].shape[1] ) def __a ( self ): # safety check on max_len default value so we are sure the test works _lowercase : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test _lowercase : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : Dict = tempfile.mkdtemp() _lowercase : Tuple = ' He is very happy, UNwant\u00E9d,running' _lowercase : Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) _lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase ) _lowercase : Optional[Any] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) shutil.rmtree(_lowerCAmelCase ) _lowercase : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : List[str] = tempfile.mkdtemp() _lowercase : int = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) _lowercase : Any = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _lowercase : Tuple = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) _lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase ) _lowercase : Tuple = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) _lowercase : List[Any] = tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _lowercase : List[str] = json.load(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _lowercase : Tuple = json.load(_lowerCAmelCase ) _lowercase : Any = [F"""<extra_id_{i}>""" for i in range(1_2_5 )] _lowercase : str = added_tokens_extra_ids + [ 'an_additional_special_token' ] _lowercase : Optional[int] = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_lowerCAmelCase , _lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_lowerCAmelCase , _lowerCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowercase : Optional[int] = tokenizer_class.from_pretrained( _lowerCAmelCase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowercase : int = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_lowerCAmelCase )] _lowercase : Tuple = tokenizer_class.from_pretrained( _lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __a ( self ): _lowercase : str = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , '�' ) def __a ( self ): pass def __a ( self ): pass def __a ( self ): pass def __a ( self ): pass def __a ( self ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens _lowercase : List[str] = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _lowercase : Optional[Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] _lowercase : Optional[Any] = tokenizer.convert_tokens_to_string(_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
677
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : torch.FloatTensor class lowerCAmelCase_ ( __snake_case , __snake_case ): @register_to_config def __init__( self , _lowerCAmelCase = 6_5_5_3_6 , _lowerCAmelCase = None , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0 , _lowerCAmelCase = "fourier" , _lowerCAmelCase = True , _lowerCAmelCase = False , _lowerCAmelCase = 0.0 , _lowerCAmelCase = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _lowerCAmelCase = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _lowerCAmelCase = "UNetMidBlock1D" , _lowerCAmelCase = None , _lowerCAmelCase = (3_2, 3_2, 6_4) , _lowerCAmelCase = None , _lowerCAmelCase = 8 , _lowerCAmelCase = 1 , _lowerCAmelCase = False , ): super().__init__() _lowercase : str = sample_size # time if time_embedding_type == "fourier": _lowercase : Optional[Any] = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=A__ , log=A__ , flip_sin_to_cos=A__ ) _lowercase : Optional[int] = 2 * block_out_channels[0] elif time_embedding_type == "positional": _lowercase : Optional[Any] = Timesteps( block_out_channels[0] , flip_sin_to_cos=A__ , downscale_freq_shift=A__ ) _lowercase : int = block_out_channels[0] if use_timestep_embedding: _lowercase : Any = block_out_channels[0] * 4 _lowercase : List[Any] = TimestepEmbedding( in_channels=A__ , time_embed_dim=A__ , act_fn=A__ , out_dim=block_out_channels[0] , ) _lowercase : int = nn.ModuleList([] ) _lowercase : Tuple = None _lowercase : List[str] = nn.ModuleList([] ) _lowercase : int = None # down _lowercase : List[str] = in_channels for i, down_block_type in enumerate(A__ ): _lowercase : Optional[Any] = output_channel _lowercase : Optional[Any] = block_out_channels[i] if i == 0: input_channel += extra_in_channels _lowercase : str = i == len(A__ ) - 1 _lowercase : Any = get_down_block( A__ , num_layers=A__ , in_channels=A__ , out_channels=A__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(A__ ) # mid _lowercase : List[str] = get_mid_block( A__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A__ , add_downsample=A__ , ) # up _lowercase : Union[str, Any] = list(reversed(A__ ) ) _lowercase : int = reversed_block_out_channels[0] if out_block_type is None: _lowercase : str = out_channels else: _lowercase : Any = block_out_channels[0] for i, up_block_type in enumerate(A__ ): _lowercase : List[str] = output_channel _lowercase : str = ( reversed_block_out_channels[i + 1] if i < len(A__ ) - 1 else final_upsample_channels ) _lowercase : int = i == len(A__ ) - 1 _lowercase : str = get_up_block( A__ , num_layers=A__ , in_channels=A__ , out_channels=A__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(A__ ) _lowercase : Dict = output_channel # out _lowercase : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 ) _lowercase : List[Any] = get_out_block( out_block_type=A__ , num_groups_out=A__ , embed_dim=block_out_channels[0] , out_channels=A__ , act_fn=A__ , fc_dim=block_out_channels[-1] // 4 , ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True , ): _lowercase : Any = timestep if not torch.is_tensor(A__ ): _lowercase : Tuple = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0: _lowercase : Optional[Any] = timesteps[None].to(sample.device ) _lowercase : List[str] = self.time_proj(A__ ) if self.config.use_timestep_embedding: _lowercase : Tuple = self.time_mlp(A__ ) else: _lowercase : Optional[int] = timestep_embed[..., None] _lowercase : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) _lowercase : Union[str, Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down _lowercase : List[str] = () for downsample_block in self.down_blocks: _lowercase : str = downsample_block(hidden_states=A__ , temb=A__ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: _lowercase : Any = self.mid_block(A__ , A__ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): _lowercase : int = down_block_res_samples[-1:] _lowercase : str = down_block_res_samples[:-1] _lowercase : List[Any] = upsample_block(A__ , res_hidden_states_tuple=A__ , temb=A__ ) # 5. post-process if self.out_block: _lowercase : str = self.out_block(A__ , A__ ) if not return_dict: return (sample,) return UNetaDOutput(sample=A__ )
720
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["ConditionalDetrFeatureExtractor"] UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
'''simple docstring''' from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def __magic_name__ ( SCREAMING_SNAKE_CASE = "isbn/0140328726" ) -> dict: _lowercase : int = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes if new_olid.count('/' ) != 1: _lowercase : List[Any] = F"""{olid} is not a valid Open Library olid""" raise ValueError(lowerCAmelCase__ ) return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json() def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> dict: _lowercase : Optional[int] = { 'title': 'Title', 'publish_date': 'Publish date', 'authors': 'Authors', 'number_of_pages': 'Number of pages:', 'first_sentence': 'First sentence', 'isbn_10': 'ISBN (10)', 'isbn_13': 'ISBN (13)', } _lowercase : Dict = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} _lowercase : Union[str, Any] = [ get_openlibrary_data(author['key'] )['name'] for author in data['Authors'] ] _lowercase : Dict = data['First sentence']['value'] for key, value in data.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): _lowercase : str = ', '.join(lowerCAmelCase__ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: UpperCamelCase = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(f'''\nSearching Open Library for ISBN: {isbn}...\n''') try: UpperCamelCase = summarize_book(get_openlibrary_data(f'''isbn/{isbn}''')) print("\n".join(f'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'''Sorry, there are no results for ISBN: {isbn}.''')
721
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Tuple = "ClapFeatureExtractor" _UpperCamelCase : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): _lowercase : str = kwargs.pop('sampling_rate' , _lowerCAmelCase ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: _lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if audios is not None: _lowercase : Any = self.feature_extractor( _lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if text is not None and audios is not None: _lowercase : Union[str, Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase ) def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property def __a ( self ): _lowercase : Dict = self.tokenizer.model_input_names _lowercase : Any = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
677
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["""GLPNFeatureExtractor"""] UpperCamelCase = ["""GLPNImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ """GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""", """GLPNForDepthEstimation""", """GLPNLayer""", """GLPNModel""", """GLPNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
700
from __future__ import annotations from typing import Any class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase ): _lowercase : Any = num_of_nodes _lowercase : list[list[int]] = [] _lowercase : dict[int, int] = {} def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): self.m_edges.append([u_node, v_node, weight] ) def __a ( self , _lowerCAmelCase ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def __a ( self , _lowerCAmelCase ): if self.m_component[u_node] != u_node: for k in self.m_component: _lowercase : Optional[int] = self.find_component(_lowerCAmelCase ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): if component_size[u_node] <= component_size[v_node]: _lowercase : str = v_node component_size[v_node] += component_size[u_node] self.set_component(_lowerCAmelCase ) elif component_size[u_node] >= component_size[v_node]: _lowercase : Any = self.find_component(_lowerCAmelCase ) component_size[u_node] += component_size[v_node] self.set_component(_lowerCAmelCase ) def __a ( self ): _lowercase : Any = [] _lowercase : Optional[Any] = 0 _lowercase : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) _lowercase : str = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: _lowercase , _lowercase , _lowercase : List[str] = edge _lowercase : Union[str, Any] = self.m_component[u] _lowercase : Union[str, Any] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): _lowercase : str = [u, v, w] for edge in minimum_weight_edge: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowercase , _lowercase , _lowercase : int = edge _lowercase : Optional[int] = self.m_component[u] _lowercase : Optional[Any] = self.m_component[v] if u_component != v_component: mst_weight += w self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 _lowercase : str = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def __magic_name__ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
677
0
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters UpperCamelCase = logging.get_logger(__name__) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Optional[Any]: # Recurse if needed if "." in tensor_name: _lowercase : Dict = tensor_name.split('.' ) for split in splits[:-1]: _lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) _lowercase : Optional[int] = new_module _lowercase : List[Any] = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) _lowercase : Optional[int] = tensor_name in module._buffers _lowercase : Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None: raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) _lowercase : Dict = False _lowercase : Dict = False if is_buffer or not is_bitsandbytes_available(): _lowercase : Any = False _lowercase : List[str] = False else: _lowercase : str = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) _lowercase : List[str] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: _lowercase : Dict = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: _lowercase : Optional[int] = old_value.to(SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ): _lowercase : int = value.to('cpu' ) if value.dtype == torch.inta: _lowercase : Optional[Any] = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse( '0.37.2' ) if not is_abit_serializable: raise ValueError( 'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ' 'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' ) else: _lowercase : Any = torch.tensor(SCREAMING_SNAKE_CASE , device='cpu' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , SCREAMING_SNAKE_CASE ) and fpaa_statistics is None: _lowercase : Optional[Any] = new_value.T _lowercase : Tuple = old_value.__dict__ if is_abit: _lowercase : Any = bnb.nn.IntaParams(SCREAMING_SNAKE_CASE , requires_grad=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) elif is_abit: _lowercase : List[str] = bnb.nn.Paramsabit(SCREAMING_SNAKE_CASE , requires_grad=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) _lowercase : Dict = new_value if fpaa_statistics is not None: setattr(module.weight , 'SCB' , fpaa_statistics.to(SCREAMING_SNAKE_CASE ) ) else: if value is None: _lowercase : List[str] = old_value.to(SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ): _lowercase : Tuple = value.to(SCREAMING_SNAKE_CASE ) else: _lowercase : List[str] = torch.tensor(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE ) if is_buffer: _lowercase : List[str] = new_value else: _lowercase : Any = nn.Parameter(SCREAMING_SNAKE_CASE , requires_grad=old_value.requires_grad ) _lowercase : List[str] = new_value def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False ) -> Optional[Any]: for name, module in model.named_children(): if current_key_name is None: _lowercase : Any = [] current_key_name.append(SCREAMING_SNAKE_CASE ) if (isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '.'.join(SCREAMING_SNAKE_CASE ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _lowercase , _lowercase : str = module.weight.shape else: _lowercase : List[str] = module.in_features _lowercase : Optional[int] = module.out_features if quantization_config.quantization_method() == "llm_int8": _lowercase : Dict = bnb.nn.LinearabitLt( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) _lowercase : List[Any] = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: _lowercase : str = bnb.nn.Linearabit( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) _lowercase : str = True # Store the module class in case we need to transpose the weight later _lowercase : List[Any] = type(SCREAMING_SNAKE_CASE ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(SCREAMING_SNAKE_CASE ) if len(list(module.children() ) ) > 0: _lowercase , _lowercase : Any = _replace_with_bnb_linear( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_been_replaced=SCREAMING_SNAKE_CASE , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str: _lowercase : Optional[Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert _lowercase , _lowercase : Dict = _replace_with_bnb_linear( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def __magic_name__ ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: warnings.warn( '`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , SCREAMING_SNAKE_CASE , ) return replace_with_bnb_linear(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __magic_name__ ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: warnings.warn( '`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , SCREAMING_SNAKE_CASE , ) return set_module_quantized_tensor_to_device(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]: _lowercase : Tuple = deepcopy(SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() _lowercase : Dict = find_tied_parameters(SCREAMING_SNAKE_CASE ) # For compatibility with Accelerate < 0.18 if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _lowercase : List[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: _lowercase : Optional[Any] = sum(SCREAMING_SNAKE_CASE , [] ) _lowercase : List[Any] = len(SCREAMING_SNAKE_CASE ) > 0 # Check if it is a base model _lowercase : Optional[Any] = not hasattr(SCREAMING_SNAKE_CASE , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head _lowercase : Optional[Any] = list(model.named_children() ) _lowercase : Any = [list_modules[-1][0]] # add last module together with tied weights _lowercase : List[Any] = set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) _lowercase : Union[str, Any] = list(set(SCREAMING_SNAKE_CASE ) ) + list(SCREAMING_SNAKE_CASE ) # remove ".weight" from the keys _lowercase : Union[str, Any] = ['.weight', '.bias'] _lowercase : Optional[int] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: _lowercase : int = name.replace(SCREAMING_SNAKE_CASE , '' ) filtered_module_names.append(SCREAMING_SNAKE_CASE ) return filtered_module_names
701
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : Tuple = {} _lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids'] _lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] ) return output UpperCamelCase = HfArgumentParser(PretokenizationArguments) UpperCamelCase = parser.parse_args() if args.num_workers is None: UpperCamelCase = multiprocessing.cpu_count() UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCamelCase = time.time() UpperCamelCase = load_dataset(args.dataset_name, split="train") print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() UpperCamelCase = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
677
0
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate _lowercase : str = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly _lowercase : str = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
702
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration} UpperCamelCase = {"facebook/bart-base": BartTokenizer} def __magic_name__ ( ) -> str: _lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' ) parser.add_argument( '--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' ) parser.add_argument( '--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , ) parser.add_argument( '--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=( 'Number of beams to use for evaluation. This argument will be ' 'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.' ) , ) parser.add_argument( '--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , ) parser.add_argument( '--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , ) parser.add_argument( '--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , ) parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' ) _lowercase : Optional[Any] = parser.parse_args() return args def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]: _lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) _lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ) if model_name in ["facebook/bart-base"]: _lowercase : Dict = 0 _lowercase : Optional[int] = None _lowercase : Union[str, Any] = 0 return huggingface_model, tokenizer def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: model.eval() _lowercase : List[Any] = None _lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) ) with torch.no_grad(): _lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.' _lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device ) _lowercase : str = model.generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( SCREAMING_SNAKE_CASE , ( inputs['input_ids'], inputs['attention_mask'], num_beams, max_length, model.config.decoder_start_token_id, ) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={ 'input_ids': {0: 'batch', 1: 'seq'}, 'output_ids': {0: 'batch', 1: 'seq_out'}, } , example_outputs=SCREAMING_SNAKE_CASE , ) logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) ) _lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) ) logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) ) _lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE ) _lowercase : Union[str, Any] = ort_sess.run( SCREAMING_SNAKE_CASE , { 'input_ids': inputs['input_ids'].cpu().numpy(), 'attention_mask': inputs['attention_mask'].cpu().numpy(), 'num_beams': np.array(SCREAMING_SNAKE_CASE ), 'max_length': np.array(SCREAMING_SNAKE_CASE ), 'decoder_start_token_id': np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info('Model outputs from torch and ONNX Runtime are similar.' ) logger.info('Success.' ) def __magic_name__ ( ) -> Any: _lowercase : Dict = parse_args() _lowercase : Union[str, Any] = 5 _lowercase : Union[str, Any] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() _lowercase : Optional[Any] = torch.device(args.device ) _lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE ) if model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' ) model.to(SCREAMING_SNAKE_CASE ) if args.max_length: _lowercase : Any = args.max_length if args.num_beams: _lowercase : List[str] = args.num_beams if args.output_file_path: _lowercase : Union[str, Any] = args.output_file_path else: _lowercase : Tuple = 'BART.onnx' logger.info('Exporting model to ONNX' ) export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
677
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter UpperCamelCase = "Create a default config file for Accelerate with only a few flags set." def __magic_name__ ( SCREAMING_SNAKE_CASE="no" , SCREAMING_SNAKE_CASE = default_json_config_file , SCREAMING_SNAKE_CASE = False ) -> str: '''simple docstring''' _lowercase : Optional[Any] = Path(SCREAMING_SNAKE_CASE_ ) path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ ) if path.exists(): print( F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" ) return False _lowercase : Tuple = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F"""`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}""" ) _lowercase : Any = { 'compute_environment': 'LOCAL_MACHINE', 'mixed_precision': mixed_precision, } if torch.cuda.is_available(): _lowercase : Union[str, Any] = torch.cuda.device_count() _lowercase : int = num_gpus _lowercase : Any = False if num_gpus > 1: _lowercase : Dict = 'MULTI_GPU' else: _lowercase : Any = 'NO' elif is_xpu_available() and use_xpu: _lowercase : Tuple = torch.xpu.device_count() _lowercase : int = num_xpus _lowercase : Any = False if num_xpus > 1: _lowercase : Dict = 'MULTI_XPU' else: _lowercase : Dict = 'NO' elif is_npu_available(): _lowercase : int = torch.npu.device_count() _lowercase : Optional[int] = num_npus _lowercase : List[str] = False if num_npus > 1: _lowercase : Union[str, Any] = 'MULTI_NPU' else: _lowercase : int = 'NO' else: _lowercase : str = 0 _lowercase : List[Any] = True _lowercase : Optional[int] = 1 _lowercase : List[str] = 'NO' _lowercase : List[str] = ClusterConfig(**SCREAMING_SNAKE_CASE_ ) config.to_json_file(SCREAMING_SNAKE_CASE_ ) return path def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' _lowercase : int = parser.add_parser('default' , parents=SCREAMING_SNAKE_CASE_ , help=SCREAMING_SNAKE_CASE_ , formatter_class=SCREAMING_SNAKE_CASE_ ) parser.add_argument( '--config_file' , default=SCREAMING_SNAKE_CASE_ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , dest='save_location' , ) parser.add_argument( '--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=SCREAMING_SNAKE_CASE_ , help='Whether or not to use mixed precision training. ' 'Choose between FP16 and BF16 (bfloat16) training. ' 'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , ) parser.set_defaults(func=SCREAMING_SNAKE_CASE_ ) return parser def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' _lowercase : Any = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F"""accelerate configuration saved at {config_file}""" )
703
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCamelCase : Union[str, Any] = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) _UpperCamelCase : List[Any] = ( { "feature-extraction": TFMobileBertModel, "fill-mask": TFMobileBertForMaskedLM, "question-answering": TFMobileBertForQuestionAnswering, "text-classification": TFMobileBertForSequenceClassification, "token-classification": TFMobileBertForTokenClassification, "zero-shot": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) _UpperCamelCase : int = False _UpperCamelCase : Optional[int] = False def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ): _lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if return_labels: if model_class in get_values(_lowerCAmelCase ): _lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class lowerCAmelCase_ ( __snake_case ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): _lowercase : Optional[Any] = parent _lowercase : str = batch_size _lowercase : Optional[int] = seq_length _lowercase : Tuple = is_training _lowercase : List[Any] = use_input_mask _lowercase : Optional[Any] = use_token_type_ids _lowercase : Any = use_labels _lowercase : str = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[int] = intermediate_size _lowercase : Tuple = hidden_act _lowercase : Dict = hidden_dropout_prob _lowercase : Optional[int] = attention_probs_dropout_prob _lowercase : Tuple = max_position_embeddings _lowercase : List[str] = type_vocab_size _lowercase : Optional[Any] = type_sequence_label_size _lowercase : List[Any] = initializer_range _lowercase : List[str] = num_labels _lowercase : Union[str, Any] = num_choices _lowercase : List[str] = scope _lowercase : Union[str, Any] = embedding_size def __a ( self ): _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : Optional[int] = None if self.use_input_mask: _lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : int = None if self.use_token_type_ids: _lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : Dict = None _lowercase : Any = None _lowercase : int = None if self.use_labels: _lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : Optional[Any] = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase ) _lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : Union[str, Any] = model(_lowerCAmelCase ) _lowercase : Tuple = [input_ids, input_mask] _lowercase : str = model(_lowerCAmelCase ) _lowercase : List[str] = model(_lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase ) _lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : int = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase ) _lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : Optional[int] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase ) _lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : Union[str, Any] = model(_lowerCAmelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[int] = self.num_labels _lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase ) _lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : List[str] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = self.num_choices _lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase ) _lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowercase : str = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } _lowercase : Union[str, Any] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : List[str] = self.num_labels _lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase ) _lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : List[str] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase ) _lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : int = model(_lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self ): _lowercase : List[str] = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : int = config_and_inputs _lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict def __a ( self ): _lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self ) _lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase ) def __a ( self ): _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase ) def __a ( self ): _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase ) def __a ( self ): _lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase ) def __a ( self ): _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase ) def __a ( self ): _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase ) def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase ) @slow def __a ( self ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @slow def __a ( self ): _lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' ) _lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) _lowercase : List[str] = model(_lowerCAmelCase )[0] _lowercase : str = [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , _lowerCAmelCase ) _lowercase : List[Any] = tf.constant( [ [ [-4.5_91_95_47, -9.24_82_95, -9.64_52_56], [-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37], [-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
677
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase = { "tokenizer_file": { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json", }, } UpperCamelCase = { "gpt-neox-20b": 2_048, } class lowerCAmelCase_ ( __A ): _UpperCamelCase : List[Any] = VOCAB_FILES_NAMES _UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"] def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase=False , **_lowerCAmelCase , ): super().__init__( _lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , ) _lowercase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , _lowerCAmelCase ) != add_prefix_space: _lowercase : List[Any] = getattr(_lowerCAmelCase , pre_tok_state.pop('type' ) ) _lowercase : int = add_prefix_space _lowercase : List[Any] = pre_tok_class(**_lowerCAmelCase ) _lowercase : int = add_prefix_space def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : Dict = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase ) def __a ( self , _lowerCAmelCase ): _lowercase : int = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] ) if len(_lowerCAmelCase ) > self.model_max_length: _lowercase : int = input_ids[-self.model_max_length :] return input_ids
704
import qiskit def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> qiskit.result.counts.Counts: _lowercase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register _lowercase : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator _lowercase : Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase = single_qubit_measure(2, 2) print(f'''Total count for various states are: {counts}''')
677
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase = { "configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"], "tokenization_perceiver": ["PerceiverTokenizer"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["PerceiverFeatureExtractor"] UpperCamelCase = ["PerceiverImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST", "PerceiverForImageClassificationConvProcessing", "PerceiverForImageClassificationFourier", "PerceiverForImageClassificationLearned", "PerceiverForMaskedLM", "PerceiverForMultimodalAutoencoding", "PerceiverForOpticalFlow", "PerceiverForSequenceClassification", "PerceiverLayer", "PerceiverModel", "PerceiverPreTrainedModel", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
705
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCamelCase = "platform" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict: if attention_mask is None: _lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ): _lowercase : List[str] = parent _lowercase : List[Any] = batch_size _lowercase : Optional[Any] = seq_length _lowercase : Optional[Any] = is_training _lowercase : Tuple = use_labels _lowercase : Dict = vocab_size _lowercase : Any = hidden_size _lowercase : Optional[Any] = num_hidden_layers _lowercase : Union[str, Any] = num_attention_heads _lowercase : Tuple = intermediate_size _lowercase : Any = hidden_act _lowercase : Optional[Any] = hidden_dropout_prob _lowercase : Tuple = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : str = eos_token_id _lowercase : int = pad_token_id _lowercase : Tuple = bos_token_id _lowercase : List[Any] = initializer_range def __a ( self ): _lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) _lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) _lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 ) _lowercase : Tuple = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , ) _lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return config, inputs_dict def __a ( self ): _lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = 2_0 _lowercase : List[Any] = model_class_name(_lowerCAmelCase ) _lowercase : List[Any] = model.encode(inputs_dict['input_ids'] ) _lowercase , _lowercase : int = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase ) _lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) _lowercase : int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowercase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _lowercase : int = model.decode( decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , ) _lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase ) _lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Dict = 2_0 _lowercase : Any = model_class_name(_lowerCAmelCase ) _lowercase : int = model.encode(inputs_dict['input_ids'] ) _lowercase , _lowercase : Optional[int] = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _lowercase : Union[str, Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase ) _lowercase : int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowercase : List[Any] = model.decode( decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _lowercase : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase ) _lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class lowerCAmelCase_ ( unittest.TestCase ): _UpperCamelCase : Tuple = 99 def __a ( self ): _lowercase : Dict = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) _lowercase : Union[str, Any] = input_ids.shape[0] _lowercase : Optional[int] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def __a ( self ): _lowercase , _lowercase , _lowercase : int = self._get_config_and_data() _lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase ) _lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase ) _lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['logits'].shape , _lowerCAmelCase ) def __a ( self ): _lowercase : Union[str, Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) _lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase ) _lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa ) _lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa ) _lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ) _lowercase : Tuple = (*summary.shape, config.vocab_size) self.assertEqual(outputs['logits'].shape , _lowerCAmelCase ) def __a ( self ): _lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa ) _lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 ) _lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum() _lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_lowerCAmelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ): _UpperCamelCase : int = True _UpperCamelCase : Any = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) _UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def __a ( self ): _lowercase : List[str] = FlaxBlenderbotSmallModelTester(self ) def __a ( self ): _lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __a ( self ): _lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __a ( self ): _lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) _lowercase : str = model_class(_lowerCAmelCase ) @jax.jit def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ): return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) with self.subTest('JIT Enabled' ): _lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def __a ( self ): _lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowercase : int = model_class(_lowerCAmelCase ) _lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) _lowercase : List[Any] = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): return model.decode( decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , ) with self.subTest('JIT Enabled' ): _lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __a ( self ): for model_class_name in self.all_model_classes: _lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id _lowercase : int = model(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase )
677
0
'''simple docstring''' class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : List[str] = name _lowercase : List[Any] = val def __str__( self ): return F"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self , _lowerCAmelCase ): return self.val < other.val class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase ): _lowercase : Any = {} _lowercase : List[Any] = {} _lowercase : str = self.build_heap(snake_case__ ) def __getitem__( self , _lowerCAmelCase ): return self.get_value(snake_case__ ) def __a ( self , _lowerCAmelCase ): return (idx - 1) // 2 def __a ( self , _lowerCAmelCase ): return idx * 2 + 1 def __a ( self , _lowerCAmelCase ): return idx * 2 + 2 def __a ( self , _lowerCAmelCase ): return self.heap_dict[key] def __a ( self , _lowerCAmelCase ): _lowercase : Optional[Any] = len(snake_case__ ) - 1 _lowercase : Union[str, Any] = self.get_parent_idx(snake_case__ ) for idx, i in enumerate(snake_case__ ): _lowercase : str = idx _lowercase : Dict = i.val for i in range(snake_case__ , -1 , -1 ): self.sift_down(snake_case__ , snake_case__ ) return array def __a ( self , _lowerCAmelCase , _lowerCAmelCase ): while True: _lowercase : Optional[Any] = self.get_left_child_idx(snake_case__ ) # noqa: E741 _lowercase : Tuple = self.get_right_child_idx(snake_case__ ) _lowercase : str = idx if l < len(snake_case__ ) and array[l] < array[idx]: _lowercase : Union[str, Any] = l if r < len(snake_case__ ) and array[r] < array[smallest]: _lowercase : int = r if smallest != idx: _lowercase , _lowercase : List[Any] = array[smallest], array[idx] ( ( _lowercase ) , ( _lowercase ) , ) : List[str] = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) _lowercase : int = smallest else: break def __a ( self , _lowerCAmelCase ): _lowercase : str = self.get_parent_idx(snake_case__ ) while p >= 0 and self.heap[p] > self.heap[idx]: _lowercase , _lowercase : List[Any] = self.heap[idx], self.heap[p] _lowercase , _lowercase : Optional[Any] = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) _lowercase : List[str] = p _lowercase : Dict = self.get_parent_idx(snake_case__ ) def __a ( self ): return self.heap[0] def __a ( self ): _lowercase , _lowercase : Dict = self.heap[-1], self.heap[0] _lowercase , _lowercase : int = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) _lowercase : List[str] = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def __a ( self , _lowerCAmelCase ): self.heap.append(snake_case__ ) _lowercase : List[str] = len(self.heap ) - 1 _lowercase : int = node.val self.sift_up(len(self.heap ) - 1 ) def __a ( self ): return len(self.heap ) == 0 def __a ( self , _lowerCAmelCase , _lowerCAmelCase ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" _lowercase : int = new_value _lowercase : List[str] = new_value self.sift_up(self.idx_of_element[node] ) UpperCamelCase = Node("R", -1) UpperCamelCase = Node("B", 6) UpperCamelCase = Node("A", 3) UpperCamelCase = Node("X", 1) UpperCamelCase = Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array UpperCamelCase = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
706
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Dict = "longformer" def __init__( self , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 7_6_8 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 3_0_7_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ): super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) _lowercase : Optional[int] = attention_window _lowercase : str = sep_token_id _lowercase : Optional[Any] = bos_token_id _lowercase : List[Any] = eos_token_id _lowercase : Optional[Any] = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Optional[int] = num_attention_heads _lowercase : List[str] = hidden_act _lowercase : List[str] = intermediate_size _lowercase : List[Any] = hidden_dropout_prob _lowercase : str = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : int = type_vocab_size _lowercase : Optional[int] = initializer_range _lowercase : List[Any] = layer_norm_eps _lowercase : List[str] = onnx_export class lowerCAmelCase_ ( __snake_case ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ): super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowercase : str = True @property def __a ( self ): if self.task == "multiple-choice": _lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowercase : int = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('global_attention_mask', dynamic_axis), ] ) @property def __a ( self ): _lowercase : Optional[int] = super().outputs if self.task == "default": _lowercase : List[str] = {0: 'batch'} return outputs @property def __a ( self ): return 1E-4 @property def __a ( self ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 1_4 ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ): _lowercase : int = super().generate_dummy_inputs( preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly _lowercase : str = torch.zeros_like(inputs['input_ids'] ) # make every second token global _lowercase : Any = 1 return inputs
677
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["GPTSw3Tokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
707
from __future__ import annotations def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool: return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
677
0
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class lowerCAmelCase_ ( __snake_case ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): _lowercase : int = parent _lowercase : Union[str, Any] = batch_size _lowercase : Union[str, Any] = seq_length _lowercase : Dict = is_training _lowercase : Tuple = use_input_mask _lowercase : int = use_token_type_ids _lowercase : Tuple = use_labels _lowercase : Any = vocab_size _lowercase : str = hidden_size _lowercase : Optional[Any] = num_hidden_layers _lowercase : Dict = num_attention_heads _lowercase : Any = intermediate_size _lowercase : Optional[Any] = hidden_act _lowercase : Tuple = hidden_dropout_prob _lowercase : Tuple = attention_probs_dropout_prob _lowercase : List[str] = max_position_embeddings _lowercase : Union[str, Any] = type_vocab_size _lowercase : str = type_sequence_label_size _lowercase : Optional[int] = initializer_range _lowercase : Optional[int] = num_labels _lowercase : int = num_choices _lowercase : Dict = scope def __a ( self ): _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : Any = None if self.use_input_mask: _lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : List[str] = None _lowercase : Dict = None _lowercase : Optional[int] = None if self.use_labels: _lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : Optional[int] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = DistilBertModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase ) _lowercase : List[str] = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Tuple = DistilBertForMaskedLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowercase : Tuple = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = DistilBertForQuestionAnswering(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowercase : Tuple = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Dict = self.num_labels _lowercase : Dict = DistilBertForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowercase : Tuple = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Dict = self.num_labels _lowercase : Union[str, Any] = DistilBertForTokenClassification(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowercase : Optional[int] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = self.num_choices _lowercase : int = DistilBertForMultipleChoice(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowercase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowercase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowercase : List[str] = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self ): _lowercase : Union[str, Any] = self.prepare_config_and_inputs() ((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Tuple = config_and_inputs _lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCamelCase : List[str] = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _UpperCamelCase : Optional[Any] = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase : Tuple = True _UpperCamelCase : List[Any] = True _UpperCamelCase : Optional[int] = True _UpperCamelCase : List[Any] = True def __a ( self ): _lowercase : int = DistilBertModelTester(self ) _lowercase : Tuple = ConfigTester(self , config_class=_lowerCAmelCase , dim=3_7 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_lowerCAmelCase ) def __a ( self ): _lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowerCAmelCase ) def __a ( self ): _lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowerCAmelCase ) @slow def __a ( self ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : List[Any] = DistilBertModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @slow @require_torch_gpu def __a ( self ): _lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return _lowercase : Any = True _lowercase : Optional[int] = model_class(config=_lowerCAmelCase ) _lowercase : Union[str, Any] = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) _lowercase : int = torch.jit.trace( _lowerCAmelCase , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'traced_model.pt' ) ) _lowercase : Optional[Any] = torch.jit.load(os.path.join(_lowerCAmelCase , 'traced_model.pt' ) , map_location=_lowerCAmelCase ) loaded(inputs_dict['input_ids'].to(_lowerCAmelCase ) , inputs_dict['attention_mask'].to(_lowerCAmelCase ) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): @slow def __a ( self ): _lowercase : int = DistilBertModel.from_pretrained('distilbert-base-uncased' ) _lowercase : List[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) _lowercase : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _lowercase : List[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0] _lowercase : Any = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , _lowerCAmelCase ) _lowercase : List[str] = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
708
import math def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ) -> list: _lowercase : List[str] = end or len(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _lowercase : Dict = i _lowercase : str = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _lowercase : Optional[Any] = array[temp_index - 1] temp_index -= 1 _lowercase : Optional[Any] = temp_index_value return array def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: # Max Heap _lowercase : List[str] = index _lowercase : List[str] = 2 * index + 1 # Left Node _lowercase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _lowercase : Any = left_index if right_index < heap_size and array[largest] < array[right_index]: _lowercase : str = right_index if largest != index: _lowercase , _lowercase : List[str] = array[largest], array[index] heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list: _lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) for i in range(n // 2 , -1 , -1 ): heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in range(n - 1 , 0 , -1 ): _lowercase , _lowercase : List[Any] = array[0], array[i] heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE ) return array def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: _lowercase : Optional[Any] = low _lowercase : Tuple = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _lowercase , _lowercase : Tuple = array[j], array[i] i += 1 def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list: if len(SCREAMING_SNAKE_CASE ) == 0: return array _lowercase : List[str] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) ) _lowercase : str = 16 return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list: while end - start > size_threshold: if max_depth == 0: return heap_sort(SCREAMING_SNAKE_CASE ) max_depth -= 1 _lowercase : int = median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 ) _lowercase : str = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = p return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = input("Enter numbers separated by a comma : ").strip() UpperCamelCase = [float(item) for item in user_input.split(",")] print(sort(unsorted))
677
0
import math def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]: _lowercase : Dict = [] _lowercase : Tuple = 2 _lowercase : List[str] = int(math.sqrt(_A ) ) # Size of every segment _lowercase : Union[str, Any] = [True] * (end + 1) _lowercase : Any = [] while start <= end: if temp[start] is True: in_prime.append(_A ) for i in range(start * start , end + 1 , _A ): _lowercase : Optional[int] = False start += 1 prime += in_prime _lowercase : Dict = end + 1 _lowercase : Dict = min(2 * end , _A ) while low <= n: _lowercase : str = [True] * (high - low + 1) for each in in_prime: _lowercase : List[str] = math.floor(low / each ) * each if t < low: t += each for j in range(_A , high + 1 , _A ): _lowercase : Any = False for j in range(len(_A ) ): if temp[j] is True: prime.append(j + low ) _lowercase : Tuple = high + 1 _lowercase : Optional[Any] = min(high + end , _A ) return prime print(sieve(10**6))
709
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["CLIPFeatureExtractor"] UpperCamelCase = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel UpperCamelCase = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48_000, 'sample_size': 65_536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48_000, 'sample_size': 65_536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48_000, 'sample_size': 131_072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16_000, 'sample_size': 65_536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16_000, 'sample_size': 65_536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16_000, 'sample_size': 65_536, }, } def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: return torch.atana(A_ , A_ ) / math.pi * 2 def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int: _lowercase : Tuple = torch.sin(t * math.pi / 2 ) ** 2 _lowercase : Dict = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_ , A_ ) class lowerCAmelCase_ ( _A ): pass class lowerCAmelCase_ ( nn.Module ): def __init__( self , _lowerCAmelCase ): super().__init__() _lowercase : Any = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) _lowercase : str = deepcopy(self.diffusion ) _lowercase : str = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple: _lowercase : List[Any] = MODELS_MAP[model_name]['url'] os.system(F"""wget {url} ./""" ) return F"""./{model_name}.ckpt""" UpperCamelCase = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } UpperCamelCase = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } UpperCamelCase = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } UpperCamelCase = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } UpperCamelCase = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } UpperCamelCase = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]: if name.startswith('skip' ): return name.replace('skip' , RES_CONV_MAP['skip'] ) # name has to be of format main.{digit} if not name.startswith('main.' ): raise ValueError(F"""ResConvBlock error with {name}""" ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]: for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_ , A_ ): return name.replace(A_ , A_ ) elif name.startswith(A_ ): return [name.replace(A_ , A_ ) for v in value] raise ValueError(F"""Attn error with {name}""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 ) -> Union[str, Any]: _lowercase : List[str] = input_string if string.split('.' )[0] == "timestep_embed": return string.replace('timestep_embed' , 'time_proj' ) _lowercase : Any = 0 if string.startswith('net.3.' ): depth += 1 _lowercase : Any = string[6:] elif string.startswith('net.' ): _lowercase : List[str] = string[4:] while string.startswith('main.7.' ): depth += 1 _lowercase : Any = string[7:] if string.startswith('main.' ): _lowercase : Optional[int] = string[5:] # mid block if string[:2].isdigit(): _lowercase : int = string[:2] _lowercase : List[Any] = string[2:] else: _lowercase : Union[str, Any] = string[0] _lowercase : str = string[1:] if depth == max_depth: _lowercase : Tuple = MID_NUM_TO_LAYER[layer_num] _lowercase : Union[str, Any] = 'mid_block' elif depth > 0 and int(A_ ) < 7: _lowercase : Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num] _lowercase : int = F"""down_blocks.{depth}""" elif depth > 0 and int(A_ ) > 7: _lowercase : str = UP_NUM_TO_LAYER[layer_num] _lowercase : Union[str, Any] = F"""up_blocks.{max_depth - depth - 1}""" elif depth == 0: _lowercase : List[Any] = DEPTH_0_TO_LAYER[layer_num] _lowercase : int = F"""up_blocks.{max_depth - 1}""" if int(A_ ) > 3 else 'down_blocks.0' if not string_left.startswith('.' ): raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" ) _lowercase : int = string_left[1:] if "resnets" in new_layer: _lowercase : int = convert_resconv_naming(A_ ) elif "attentions" in new_layer: _lowercase : Union[str, Any] = convert_attn_naming(A_ ) _lowercase : List[Any] = new_string_left if not isinstance(A_ , A_ ): _lowercase : Dict = prefix + '.' + new_layer + '.' + string_left else: _lowercase : Union[str, Any] = [prefix + '.' + new_layer + '.' + s for s in string_left] return new_string def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : Optional[int] = {} for k, v in state_dict.items(): if k.endswith('kernel' ): # up- and downsample layers, don't have trainable weights continue _lowercase : int = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_ , A_ ): _lowercase : Any = transform_conv_attns(A_ , A_ , A_ ) else: _lowercase : Tuple = v return new_state_dict def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: if len(A_ ) == 1: if len(v.shape ) == 3: # weight _lowercase : Tuple = v[:, :, 0] else: # bias _lowercase : Any = v else: # qkv matrices _lowercase : str = v.shape[0] _lowercase : str = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _lowercase : Any = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _lowercase : int = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]: _lowercase : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) _lowercase : int = args.model_path.split('/' )[-1].split('.' )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}""" _lowercase : str = download(A_ ) _lowercase : Optional[int] = MODELS_MAP[model_name]['sample_rate'] _lowercase : Any = MODELS_MAP[model_name]['sample_size'] _lowercase : List[str] = Object() _lowercase : Any = sample_size _lowercase : int = sample_rate _lowercase : Any = 0 _lowercase : Optional[int] = UNetaDModel(sample_size=A_ , sample_rate=A_ ) _lowercase : Union[str, Any] = diffusers_model.state_dict() _lowercase : Optional[Any] = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path , map_location=A_ )['state_dict'] ) _lowercase : str = orig_model.diffusion_ema.eval() _lowercase : Any = orig_model.state_dict() _lowercase : List[Any] = rename_orig_weights(A_ ) _lowercase : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _lowercase : str = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, F"""Problem with {renamed_minus_diffusers}""" assert all(k.endswith('kernel' ) for k in list(A_ ) ), F"""Problem with {diffusers_minus_renamed}""" for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F"""Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}""" if key == "time_proj.weight": _lowercase : Optional[int] = value.squeeze() _lowercase : Union[str, Any] = value diffusers_model.load_state_dict(A_ ) _lowercase : Any = 100 _lowercase : List[Any] = 33 _lowercase : Optional[Any] = IPNDMScheduler(num_train_timesteps=A_ ) _lowercase : Dict = torch.manual_seed(A_ ) _lowercase : str = torch.randn([1, 2, config.sample_size] , generator=A_ ).to(A_ ) _lowercase : Optional[int] = torch.linspace(1 , 0 , steps + 1 , device=A_ )[:-1] _lowercase : Optional[Any] = get_crash_schedule(A_ ) _lowercase : Dict = DanceDiffusionPipeline(unet=A_ , scheduler=A_ ) _lowercase : Optional[int] = torch.manual_seed(33 ) _lowercase : Tuple = pipe(num_inference_steps=A_ , generator=A_ ).audios _lowercase : Optional[int] = sampling.iplms_sample(A_ , A_ , A_ , {} ) _lowercase : Optional[Any] = generated.clamp(-1 , 1 ) _lowercase : Optional[int] = (generated - audio).abs().sum() _lowercase : Union[str, Any] = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print('Diff sum' , A_ ) print('Diff max' , A_ ) assert diff_max < 1E-3, F"""Diff max: {diff_max} is too much :-/""" print(F"""Conversion for {model_name} successful!""" ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") UpperCamelCase = parser.parse_args() main(args)
710
from collections.abc import Sequence def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: _lowercase : Optional[Any] = 0.0 for coeff in reversed(SCREAMING_SNAKE_CASE ): _lowercase : Optional[int] = result * x + coeff return result if __name__ == "__main__": UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCamelCase = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
677
0
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowerCAmelCase_ ( unittest.TestCase ): _UpperCamelCase : int = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[int] = hf_hub_download( repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) _lowercase : Any = VideoClassificationPipeline(model=_A , image_processor=_A , top_k=2 ) _lowercase : Any = [ example_video_filepath, 'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4', ] return video_classifier, examples def __a ( self , _lowerCAmelCase , _lowerCAmelCase ): for example in examples: _lowercase : Optional[Any] = video_classifier(_A ) self.assertEqual( _A , [ {'score': ANY(_A ), 'label': ANY(_A )}, {'score': ANY(_A ), 'label': ANY(_A )}, ] , ) @require_torch def __a ( self ): _lowercase : Dict = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification' _lowercase : Optional[Any] = VideoMAEFeatureExtractor( size={'shortest_edge': 1_0} , crop_size={'height': 1_0, 'width': 1_0} ) _lowercase : List[str] = pipeline( 'video-classification' , model=_A , feature_extractor=_A , frame_sampling_rate=4 ) _lowercase : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) _lowercase : Optional[int] = video_classifier(_A , top_k=2 ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , ) _lowercase : str = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [ [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], ] , ) @require_tf def __a ( self ): pass
711
from __future__ import annotations class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase=None ): _lowercase : int = data _lowercase : Union[str, Any] = None def __repr__( self ): _lowercase : Dict = [] _lowercase : Tuple = self while temp: string_rep.append(F"""{temp.data}""" ) _lowercase : Optional[Any] = temp.next return "->".join(_lowerCAmelCase ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: if not elements_list: raise Exception('The Elements List is empty' ) _lowercase : Union[str, Any] = Node(elements_list[0] ) for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): _lowercase : Optional[int] = Node(elements_list[i] ) _lowercase : List[Any] = current.next return head def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None: if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): print_reverse(head_node.next ) print(head_node.data ) def __magic_name__ ( ) -> List[str]: from doctest import testmod testmod() _lowercase : int = make_linked_list([14, 52, 14, 12, 43] ) print('Linked List:' ) print(SCREAMING_SNAKE_CASE ) print('Elements in Reverse:' ) print_reverse(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
677
0
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# UpperCamelCase : int = [ # (stable-diffusion, HF Diffusers) ("time_embed.0.weight", "time_embedding.linear_1.weight"), ("time_embed.0.bias", "time_embedding.linear_1.bias"), ("time_embed.2.weight", "time_embedding.linear_2.weight"), ("time_embed.2.bias", "time_embedding.linear_2.bias"), ("input_blocks.0.0.weight", "conv_in.weight"), ("input_blocks.0.0.bias", "conv_in.bias"), ("out.0.weight", "conv_norm_out.weight"), ("out.0.bias", "conv_norm_out.bias"), ("out.2.weight", "conv_out.weight"), ("out.2.bias", "conv_out.bias"), ] UpperCamelCase : str = [ # (stable-diffusion, HF Diffusers) ("in_layers.0", "norm1"), ("in_layers.2", "conv1"), ("out_layers.0", "norm2"), ("out_layers.3", "conv2"), ("emb_layers.1", "time_emb_proj"), ("skip_connection", "conv_shortcut"), ] UpperCamelCase : List[str] = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks UpperCamelCase : str = f'''down_blocks.{i}.resnets.{j}.''' UpperCamelCase : Union[str, Any] = f'''input_blocks.{3*i + j + 1}.0.''' unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 UpperCamelCase : List[str] = f'''down_blocks.{i}.attentions.{j}.''' UpperCamelCase : Optional[Any] = f'''input_blocks.{3*i + j + 1}.1.''' unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks UpperCamelCase : Tuple = f'''up_blocks.{i}.resnets.{j}.''' UpperCamelCase : Any = f'''output_blocks.{3*i + j}.0.''' unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 UpperCamelCase : Any = f'''up_blocks.{i}.attentions.{j}.''' UpperCamelCase : Union[str, Any] = f'''output_blocks.{3*i + j}.1.''' unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 UpperCamelCase : Any = f'''down_blocks.{i}.downsamplers.0.conv.''' UpperCamelCase : Any = f'''input_blocks.{3*(i+1)}.0.op.''' unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 UpperCamelCase : Optional[Any] = f'''up_blocks.{i}.upsamplers.0.''' UpperCamelCase : Tuple = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.''' unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) UpperCamelCase : Optional[Any] = "mid_block.attentions.0." UpperCamelCase : Any = "middle_block.1." unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): UpperCamelCase : Optional[Any] = f'''mid_block.resnets.{j}.''' UpperCamelCase : int = f'''middle_block.{2*j}.''' unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str: _lowercase : Dict = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: _lowercase : Tuple = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: _lowercase : Optional[int] = v.replace(__snake_case , __snake_case ) _lowercase : int = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: _lowercase : List[Any] = v.replace(__snake_case , __snake_case ) _lowercase : str = v _lowercase : List[Any] = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# UpperCamelCase : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ("nin_shortcut", "conv_shortcut"), ("norm_out", "conv_norm_out"), ("mid.attn_1.", "mid_block.attentions.0."), ] for i in range(4): # down_blocks have two resnets for j in range(2): UpperCamelCase : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.''' UpperCamelCase : Optional[Any] = f'''encoder.down.{i}.block.{j}.''' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: UpperCamelCase : Any = f'''down_blocks.{i}.downsamplers.0.''' UpperCamelCase : List[str] = f'''down.{i}.downsample.''' vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) UpperCamelCase : Tuple = f'''up_blocks.{i}.upsamplers.0.''' UpperCamelCase : Union[str, Any] = f'''up.{3-i}.upsample.''' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): UpperCamelCase : Optional[Any] = f'''decoder.up_blocks.{i}.resnets.{j}.''' UpperCamelCase : Dict = f'''decoder.up.{3-i}.block.{j}.''' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): UpperCamelCase : Any = f'''mid_block.resnets.{i}.''' UpperCamelCase : Dict = f'''mid.block_{i+1}.''' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) UpperCamelCase : Dict = [ # (stable-diffusion, HF Diffusers) ("norm.", "group_norm."), ("q.", "query."), ("k.", "key."), ("v.", "value."), ("proj_out.", "proj_attn."), ] def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]: return w.reshape(*w.shape , 1 , 1 ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : str = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: _lowercase : Union[str, Any] = v.replace(__snake_case , __snake_case ) _lowercase : str = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: _lowercase : Optional[Any] = v.replace(__snake_case , __snake_case ) _lowercase : int = v _lowercase : int = {v: vae_state_dict[k] for k, v in mapping.items()} _lowercase : Any = ['q', 'k', 'v', 'proj_out'] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F"""mid.attn_1.{weight_name}.weight""" in k: print(F"""Reshaping {k} for SD format""" ) _lowercase : List[Any] = reshape_weight_for_sd(__snake_case ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# UpperCamelCase : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ("resblocks.", "text_model.encoder.layers."), ("ln_1", "layer_norm1"), ("ln_2", "layer_norm2"), (".c_fc.", ".fc1."), (".c_proj.", ".fc2."), (".attn", ".self_attn"), ("ln_final.", "transformer.text_model.final_layer_norm."), ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), ] UpperCamelCase : List[str] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} UpperCamelCase : Optional[Any] = re.compile("|".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp UpperCamelCase : Dict = {"q": 0, "k": 1, "v": 2} def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: _lowercase : str = {} _lowercase : Optional[Any] = {} _lowercase : Optional[int] = {} for k, v in text_enc_dict.items(): if ( k.endswith('.self_attn.q_proj.weight' ) or k.endswith('.self_attn.k_proj.weight' ) or k.endswith('.self_attn.v_proj.weight' ) ): _lowercase : Tuple = k[: -len('.q_proj.weight' )] _lowercase : Dict = k[-len('q_proj.weight' )] if k_pre not in capture_qkv_weight: _lowercase : Tuple = [None, None, None] _lowercase : Union[str, Any] = v continue if ( k.endswith('.self_attn.q_proj.bias' ) or k.endswith('.self_attn.k_proj.bias' ) or k.endswith('.self_attn.v_proj.bias' ) ): _lowercase : str = k[: -len('.q_proj.bias' )] _lowercase : int = k[-len('q_proj.bias' )] if k_pre not in capture_qkv_bias: _lowercase : Tuple = [None, None, None] _lowercase : Optional[int] = v continue _lowercase : List[Any] = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , __snake_case ) _lowercase : Optional[int] = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' ) _lowercase : str = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , __snake_case ) _lowercase : List[str] = torch.cat(__snake_case ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' ) _lowercase : str = textenc_pattern.sub(lambda SCREAMING_SNAKE_CASE : protected[re.escape(m.group(0 ) )] , __snake_case ) _lowercase : str = torch.cat(__snake_case ) return new_state_dict def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple: return text_enc_dict if __name__ == "__main__": UpperCamelCase : str = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt." ) UpperCamelCase : List[str] = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors UpperCamelCase : List[str] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors") UpperCamelCase : Dict = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors") UpperCamelCase : Dict = osp.join(args.model_path, "text_encoder", "model.safetensors") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): UpperCamelCase : Optional[int] = load_file(unet_path, device="cpu") else: UpperCamelCase : Optional[Any] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin") UpperCamelCase : Optional[int] = torch.load(unet_path, map_location="cpu") if osp.exists(vae_path): UpperCamelCase : List[Any] = load_file(vae_path, device="cpu") else: UpperCamelCase : List[str] = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin") UpperCamelCase : Dict = torch.load(vae_path, map_location="cpu") if osp.exists(text_enc_path): UpperCamelCase : Optional[Any] = load_file(text_enc_path, device="cpu") else: UpperCamelCase : str = osp.join(args.model_path, "text_encoder", "pytorch_model.bin") UpperCamelCase : Optional[int] = torch.load(text_enc_path, map_location="cpu") # Convert the UNet model UpperCamelCase : int = convert_unet_state_dict(unet_state_dict) UpperCamelCase : List[Any] = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()} # Convert the VAE model UpperCamelCase : List[str] = convert_vae_state_dict(vae_state_dict) UpperCamelCase : str = {"first_stage_model." + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper UpperCamelCase : Optional[int] = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm UpperCamelCase : Dict = {"transformer." + k: v for k, v in text_enc_dict.items()} UpperCamelCase : Dict = convert_text_enc_state_dict_vaa(text_enc_dict) UpperCamelCase : List[str] = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()} else: UpperCamelCase : Dict = convert_text_enc_state_dict(text_enc_dict) UpperCamelCase : int = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint UpperCamelCase : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: UpperCamelCase : Any = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: UpperCamelCase : Dict = {"state_dict": state_dict} torch.save(state_dict, args.checkpoint_path)
712
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007 def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut: return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut: return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2) if __name__ == "__main__": def __magic_name__ ( ) -> None: from timeit import timeit print('Without Numpy' ) print( timeit( 'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) print('With Numpy' ) print( timeit( 'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) benchmark()
677
0
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "vocab_file": "vocab.json", "tokenizer_config_file": "tokenizer_config.json", "merges_file": "merges.txt", } UpperCamelCase = { "vocab_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json" ), }, "tokenizer_config_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json" ), }, "merges_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt" ), }, } UpperCamelCase = "</w>" UpperCamelCase = "@@ " def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: _lowercase : Optional[int] = set() _lowercase : Dict = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _lowercase : Optional[Any] = char return pairs # Speech2Text2 has no max input length UpperCamelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024} class lowerCAmelCase_ ( lowercase_ ): _UpperCamelCase : Tuple = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : str = ["input_ids", "attention_mask"] def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase=False , _lowerCAmelCase=None , **_lowerCAmelCase , ): super().__init__( unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , **lowerCamelCase_ , ) _lowercase : Union[str, Any] = do_lower_case with open(lowerCamelCase_ , encoding='utf-8' ) as vocab_handle: _lowercase : Optional[int] = json.load(lowerCamelCase_ ) _lowercase : int = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" ) _lowercase : Any = None _lowercase : Optional[Any] = None else: with open(lowerCamelCase_ , encoding='utf-8' ) as merges_handle: _lowercase : Optional[int] = merges_handle.read().split('\n' )[:-1] _lowercase : int = [tuple(merge.split()[:2] ) for merge in merges] _lowercase : str = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) _lowercase : List[str] = {} @property def __a ( self ): return len(self.decoder ) def __a ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self , _lowerCAmelCase ): _lowercase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _lowercase : Dict = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: _lowercase : Optional[int] = min(lowerCamelCase_ , key=lambda _lowerCAmelCase : self.bpe_ranks.get(lowerCamelCase_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _lowercase : List[Any] = bigram _lowercase : Any = [] _lowercase : Optional[int] = 0 while i < len(lowerCamelCase_ ): try: _lowercase : List[str] = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _lowercase : Union[str, Any] = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _lowercase : List[Any] = tuple(lowerCamelCase_ ) _lowercase : Optional[int] = new_word if len(lowerCamelCase_ ) == 1: break else: _lowercase : Any = get_pairs(lowerCamelCase_ ) _lowercase : Dict = """ """.join(lowerCamelCase_ ) if word == "\n " + BPE_TOKEN_MERGES: _lowercase : List[str] = """\n""" + BPE_TOKEN_MERGES if word.endswith(lowerCamelCase_ ): _lowercase : Tuple = word.replace(lowerCamelCase_ , '' ) _lowercase : Optional[Any] = word.replace(' ' , lowerCamelCase_ ) _lowercase : int = word return word def __a ( self , _lowerCAmelCase ): if self.bpe_ranks is None: raise ValueError( 'This tokenizer was instantiated without a `merges.txt` file, so' ' that it can only be used for decoding, not for encoding.' 'Make sure to provide `merges.txt` file at instantiation to enable ' 'encoding.' ) if self.do_lower_case: _lowercase : Any = text.lower() _lowercase : Any = text.split() _lowercase : Optional[int] = [] for token in text: if token: split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(' ' ) ) ) return split_tokens def __a ( self , _lowerCAmelCase ): return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def __a ( self , _lowerCAmelCase ): _lowercase : List[str] = self.decoder.get(lowerCamelCase_ , self.unk_token ) return result def __a ( self , _lowerCAmelCase ): _lowercase : int = """ """.join(lowerCamelCase_ ) # make sure @@ tokens are concatenated _lowercase : Optional[int] = """""".join(string.split(lowerCamelCase_ ) ) return string def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if not os.path.isdir(lowerCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _lowercase : List[str] = os.path.join( lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _lowercase : int = os.path.join( lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '\n' ) _lowercase : Union[str, Any] = 0 if self.bpe_ranks is None: return (vocab_file,) with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) _lowercase : Any = token_index writer.write(' '.join(lowerCamelCase_ ) + '\n' ) index += 1 return (vocab_file, merges_file)
713
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { "configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", "Swinv2Model", "Swinv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase = 16 UpperCamelCase = 32 def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 ) -> Dict: _lowercase : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-cased' ) _lowercase : Any = load_dataset('glue' , 'mrpc' ) def tokenize_function(SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _lowercase : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=A__ , max_length=A__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _lowercase : List[Any] = datasets.map( A__ , batched=A__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowercase : Dict = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _lowercase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _lowercase : Union[str, Any] = 16 elif accelerator.mixed_precision != "no": _lowercase : List[str] = 8 else: _lowercase : str = None return tokenizer.pad( A__ , padding='longest' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='pt' , ) # Instantiate dataloaders. _lowercase : List[str] = DataLoader( tokenized_datasets['train'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) _lowercase : Tuple = DataLoader( tokenized_datasets['validation'] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase = mocked_dataloaders # noqa: F811 def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: if os.environ.get('TESTING_MOCKED_DATALOADERS' , A__ ) == "1": _lowercase : Any = 2 # Initialize accelerator _lowercase : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowercase : Union[str, Any] = config['lr'] _lowercase : Any = int(config['num_epochs'] ) _lowercase : List[str] = int(config['seed'] ) _lowercase : Dict = int(config['batch_size'] ) _lowercase : Optional[int] = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation _lowercase : Dict = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _lowercase : int = batch_size // MAX_GPU_BATCH_SIZE _lowercase : Dict = MAX_GPU_BATCH_SIZE set_seed(A__ ) _lowercase , _lowercase : Optional[int] = get_dataloaders(A__ , A__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowercase : List[str] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=A__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowercase : Any = model.to(accelerator.device ) # Instantiate optimizer _lowercase : List[str] = AdamW(params=model.parameters() , lr=A__ ) # Instantiate scheduler _lowercase : Any = get_linear_schedule_with_warmup( optimizer=A__ , num_warmup_steps=100 , num_training_steps=(len(A__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Any = accelerator.prepare( A__ , A__ , A__ , A__ , A__ ) # Now we train the model for epoch in range(A__ ): model.train() for step, batch in enumerate(A__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _lowercase : Any = model(**A__ ) _lowercase : int = outputs.loss _lowercase : Optional[Any] = loss / gradient_accumulation_steps accelerator.backward(A__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() _lowercase : List[str] = 0 for step, batch in enumerate(A__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowercase : Tuple = model(**A__ ) _lowercase : str = outputs.logits.argmax(dim=-1 ) _lowercase , _lowercase : str = accelerator.gather((predictions, batch['labels']) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(A__ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples _lowercase : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen] _lowercase : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=A__ , references=A__ , ) _lowercase : Union[str, Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , A__ ) def __magic_name__ ( ) -> Tuple: _lowercase : Tuple = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=A__ , default=A__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) _lowercase : int = parser.parse_args() _lowercase : List[str] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(A__ , A__ ) if __name__ == "__main__": main()
714
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase = { "vocab_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt" ), "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json" ), "google/electra-base-generator": ( "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json" ), "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json" ), }, } UpperCamelCase = { "google/electra-small-generator": 512, "google/electra-base-generator": 512, "google/electra-large-generator": 512, "google/electra-small-discriminator": 512, "google/electra-base-discriminator": 512, "google/electra-large-discriminator": 512, } UpperCamelCase = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Any = VOCAB_FILES_NAMES _UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[str] = ElectraTokenizer def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ): super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) _lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars ): _lowercase : Any = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) ) _lowercase : Dict = do_lower_case _lowercase : Optional[Any] = strip_accents _lowercase : Any = tokenize_chinese_chars _lowercase : Tuple = normalizer_class(**_lowerCAmelCase ) _lowercase : Union[str, Any] = do_lower_case def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ): _lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : str = [self.sep_token_id] _lowercase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : Any = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
677
0
import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='session' ) def __magic_name__ ( ) -> Any: _lowercase : str = 10 _lowercase : Union[str, Any] = datasets.Features( { 'tokens': datasets.Sequence(datasets.Value('string' ) ), 'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ), 'answers': datasets.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), 'id': datasets.Value('int64' ), } ) _lowercase : List[str] = datasets.Dataset.from_dict( { 'tokens': [['foo'] * 5] * n, 'labels': [[1] * 5] * n, 'answers': [{'answer_start': [97], 'text': ['1976']}] * 10, 'id': list(range(lowerCamelCase__ ) ), } , features=lowerCamelCase__ , ) return dataset @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: _lowercase : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' ) dataset.map(cache_file_name=lowerCamelCase__ ) return filename # FILE_CONTENT + files UpperCamelCase = "\\n Text data.\n Second line of data." @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Any = tmp_path_factory.mktemp('data' ) / "file.txt" _lowercase : List[str] = FILE_CONTENT with open(lowerCamelCase__ , 'w' ) as f: f.write(lowerCamelCase__ ) return filename @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int: import bza _lowercase : Dict = tmp_path_factory.mktemp('data' ) / "file.txt.bz2" _lowercase : Tuple = bytes(lowerCamelCase__ , 'utf-8' ) with bza.open(lowerCamelCase__ , 'wb' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple: import gzip _lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' ) _lowercase : List[str] = bytes(lowerCamelCase__ , 'utf-8' ) with gzip.open(lowerCamelCase__ , 'wb' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple: if datasets.config.LZ4_AVAILABLE: import lza.frame _lowercase : Dict = tmp_path_factory.mktemp('data' ) / "file.txt.lz4" _lowercase : Optional[Any] = bytes(lowerCamelCase__ , 'utf-8' ) with lza.frame.open(lowerCamelCase__ , 'wb' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: if datasets.config.PY7ZR_AVAILABLE: import pyazr _lowercase : int = tmp_path_factory.mktemp('data' ) / "file.txt.7z" with pyazr.SevenZipFile(lowerCamelCase__ , 'w' ) as archive: archive.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: import tarfile _lowercase : str = tmp_path_factory.mktemp('data' ) / "file.txt.tar" with tarfile.TarFile(lowerCamelCase__ , 'w' ) as f: f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]: import lzma _lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / "file.txt.xz" _lowercase : List[str] = bytes(lowerCamelCase__ , 'utf-8' ) with lzma.open(lowerCamelCase__ , 'wb' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: import zipfile _lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / "file.txt.zip" with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd _lowercase : Any = tmp_path_factory.mktemp('data' ) / "file.txt.zst" _lowercase : Optional[Any] = bytes(lowerCamelCase__ , 'utf-8' ) with zstd.open(lowerCamelCase__ , 'wb' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str: _lowercase : Tuple = tmp_path_factory.mktemp('data' ) / "file.xml" _lowercase : Optional[int] = textwrap.dedent( '\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' ) with open(lowerCamelCase__ , 'w' ) as f: f.write(lowerCamelCase__ ) return filename UpperCamelCase = [ {"col_1": "0", "col_2": 0, "col_3": 0.0}, {"col_1": "1", "col_2": 1, "col_3": 1.0}, {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "3", "col_2": 3, "col_3": 3.0}, ] UpperCamelCase = [ {"col_1": "4", "col_2": 4, "col_3": 4.0}, {"col_1": "5", "col_2": 5, "col_3": 5.0}, ] UpperCamelCase = { "col_1": ["0", "1", "2", "3"], "col_2": [0, 1, 2, 3], "col_3": [0.0, 1.0, 2.0, 3.0], } UpperCamelCase = [ {"col_3": 0.0, "col_1": "0", "col_2": 0}, {"col_3": 1.0, "col_1": "1", "col_2": 1}, ] UpperCamelCase = [ {"col_1": "s0", "col_2": 0, "col_3": 0.0}, {"col_1": "s1", "col_2": 1, "col_3": 1.0}, {"col_1": "s2", "col_2": 2, "col_3": 2.0}, {"col_1": "s3", "col_2": 3, "col_3": 3.0}, ] @pytest.fixture(scope='session' ) def __magic_name__ ( ) -> Optional[int]: return DATA_DICT_OF_LISTS @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int: _lowercase : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase__ ) _lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' ) dataset.map(cache_file_name=lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: _lowercase : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' ) with contextlib.closing(sqlitea.connect(lowerCamelCase__ ) ) as con: _lowercase : Union[str, Any] = con.cursor() cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' ) for item in DATA: cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]: _lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' ) with open(lowerCamelCase__ , 'w' , newline='' ) as f: _lowercase : Optional[int] = csv.DictWriter(lowerCamelCase__ , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple: _lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' ) with open(lowerCamelCase__ , 'w' , newline='' ) as f: _lowercase : Union[str, Any] = csv.DictWriter(lowerCamelCase__ , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: import bza _lowercase : Any = tmp_path_factory.mktemp('data' ) / "dataset.csv.bz2" with open(lowerCamelCase__ , 'rb' ) as f: _lowercase : Optional[Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowerCamelCase__ , 'wb' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: _lowercase : Optional[int] = tmp_path_factory.mktemp('data' ) / "dataset.csv.zip" with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: _lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / "dataset.csv.zip" with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: _lowercase : Dict = tmp_path_factory.mktemp('data' ) / "dataset_with_dir.csv.zip" with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f: f.write(lowerCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase__ ) ) ) f.write(lowerCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str: _lowercase : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' ) _lowercase : str = pa.schema( { 'col_1': pa.string(), 'col_2': pa.intaa(), 'col_3': pa.floataa(), } ) with open(lowerCamelCase__ , 'wb' ) as f: _lowercase : Union[str, Any] = pq.ParquetWriter(lowerCamelCase__ , schema=lowerCamelCase__ ) _lowercase : Optional[Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase__ ) )] for k in DATA[0]} , schema=lowerCamelCase__ ) writer.write_table(lowerCamelCase__ ) writer.close() return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) _lowercase : int = {"data": DATA} with open(lowerCamelCase__ , 'w' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) _lowercase : int = {"data": DATA_DICT_OF_LISTS} with open(lowerCamelCase__ , 'w' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: _lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' ) with open(lowerCamelCase__ , 'w' ) as f: for item in DATA: f.write(json.dumps(lowerCamelCase__ ) + '\n' ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple: _lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' ) with open(lowerCamelCase__ , 'w' ) as f: for item in DATA: f.write(json.dumps(lowerCamelCase__ ) + '\n' ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]: _lowercase : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' ) with open(lowerCamelCase__ , 'w' ) as f: for item in DATA_312: f.write(json.dumps(lowerCamelCase__ ) + '\n' ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]: _lowercase : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' ) with open(lowerCamelCase__ , 'w' ) as f: for item in DATA_STR: f.write(json.dumps(lowerCamelCase__ ) + '\n' ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: import gzip _lowercase : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' ) with open(lowerCamelCase__ , 'rb' ) as orig_file: with gzip.open(lowerCamelCase__ , 'wb' ) as zipped_file: zipped_file.writelines(lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: import gzip _lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' ) with open(lowerCamelCase__ , 'rb' ) as orig_file: with gzip.open(lowerCamelCase__ , 'wb' ) as zipped_file: zipped_file.writelines(lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: _lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / "dataset.jsonl.zip" with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: _lowercase : List[Any] = tmp_path_factory.mktemp('data' ) / "dataset_nested.jsonl.zip" with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f: f.write(lowerCamelCase__ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: _lowercase : Any = tmp_path_factory.mktemp('data' ) / "dataset_with_dir.jsonl.zip" with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f: f.write(lowerCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase__ ) ) ) f.write(lowerCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: _lowercase : Dict = tmp_path_factory.mktemp('data' ) / "dataset.jsonl.tar" with tarfile.TarFile(lowerCamelCase__ , 'w' ) as f: f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: _lowercase : Dict = tmp_path_factory.mktemp('data' ) / "dataset_nested.jsonl.tar" with tarfile.TarFile(lowerCamelCase__ , 'w' ) as f: f.add(lowerCamelCase__ , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str: _lowercase : List[Any] = ["0", "1", "2", "3"] _lowercase : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' ) with open(lowerCamelCase__ , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]: _lowercase : Union[str, Any] = ["0", "1", "2", "3"] _lowercase : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' ) with open(lowerCamelCase__ , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : List[str] = ["0", "1", "2", "3"] _lowercase : Optional[Any] = tmp_path_factory.mktemp('data' ) / "dataset.abc" with open(lowerCamelCase__ , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : Tuple = tmp_path_factory.mktemp('data' ) / "dataset.text.zip" with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: _lowercase : Any = tmp_path_factory.mktemp('data' ) / "dataset_with_dir.text.zip" with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f: f.write(lowerCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase__ ) ) ) f.write(lowerCamelCase__ , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : Any = tmp_path_factory.mktemp('data' ) / "dataset.ext.zip" with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename('unsupported.ext' ) ) f.write(lowerCamelCase__ , arcname=os.path.basename('unsupported_2.ext' ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: _lowercase : Union[str, Any] = "\n".join(['First', 'Second\u2029with Unicode new line', 'Third'] ) _lowercase : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' ) with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( ) -> Optional[int]: return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' ) @pytest.fixture(scope='session' ) def __magic_name__ ( ) -> Any: return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' ) @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: _lowercase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / "dataset.img.zip" with zipfile.ZipFile(lowerCamelCase__ , 'w' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ).replace('.jpg' , '2.jpg' ) ) return path @pytest.fixture(scope='session' ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: _lowercase : Optional[Any] = tmp_path_factory.mktemp('data_dir' ) (data_dir / "subdir").mkdir() with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden file with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 10 ) with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 10 ) return data_dir
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging UpperCamelCase = { "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Optional[Any]: _lowercase : int = XLNetConfig.from_json_file(a__ ) _lowercase : Dict = finetuning_task.lower() if finetuning_task is not None else '' if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) _lowercase : Optional[Any] = finetuning_task _lowercase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task] _lowercase : Union[str, Any] = XLNetForSequenceClassification(a__ ) elif "squad" in finetuning_task: _lowercase : Any = finetuning_task _lowercase : Tuple = XLNetForQuestionAnswering(a__ ) else: _lowercase : Optional[Any] = XLNetLMHeadModel(a__ ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(a__ , a__ , a__ ) # Save pytorch-model _lowercase : int = os.path.join(a__ , a__ ) _lowercase : Dict = os.path.join(a__ , a__ ) print(F"""Save PyTorch model to {os.path.abspath(a__ )}""" ) torch.save(model.state_dict() , a__ ) print(F"""Save configuration file to {os.path.abspath(a__ )}""" ) with open(a__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) UpperCamelCase = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
716
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: for attribute in key.split('.' ): _lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: _lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: _lowercase : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowercase : List[str] = value elif weight_type == "weight_g": _lowercase : Any = value elif weight_type == "weight_v": _lowercase : Tuple = value elif weight_type == "bias": _lowercase : List[str] = value else: _lowercase : Dict = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Optional[int] = [] _lowercase : Optional[int] = fairseq_model.state_dict() _lowercase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _lowercase : Dict = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) _lowercase : int = True else: for key, mapped_key in MAPPING.items(): _lowercase : Union[str, Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned): _lowercase : Union[str, Any] = True if "*" in mapped_key: _lowercase : Dict = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2] _lowercase : Dict = mapped_key.replace('*' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: _lowercase : Optional[int] = 'weight_g' elif "weight_v" in name: _lowercase : Optional[Any] = 'weight_v' elif "weight" in name: _lowercase : str = 'weight' elif "bias" in name: _lowercase : Any = 'bias' else: _lowercase : str = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Any = full_name.split('conv_layers.' )[-1] _lowercase : Any = name.split('.' ) _lowercase : Optional[Any] = int(items[0] ) _lowercase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowercase : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowercase : List[str] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _lowercase : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowercase : List[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) @torch.no_grad() def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: if config_path is not None: _lowercase : Optional[int] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: _lowercase : List[Any] = HubertConfig() if is_finetuned: if dict_path: _lowercase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowercase : Dict = target_dict.pad_index _lowercase : Dict = target_dict.bos_index _lowercase : Tuple = target_dict.eos_index _lowercase : List[Any] = len(target_dict.symbols ) _lowercase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) ) return os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , SCREAMING_SNAKE_CASE ) _lowercase : int = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , ) _lowercase : str = True if config.feat_extract_norm == 'layer' else False _lowercase : Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) _lowercase : Tuple = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE ) else: _lowercase : List[Any] = HubertModel(SCREAMING_SNAKE_CASE ) if is_finetuned: _lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: _lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _lowercase : int = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) UpperCamelCase = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
677
0
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "artists_file": "artists.json", "lyrics_file": "lyrics.json", "genres_file": "genres.json", } UpperCamelCase = { "artists_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json", }, "genres_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json", }, "lyrics_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json", }, } UpperCamelCase = { "jukebox": 512, } class lowerCAmelCase_ ( __lowerCAmelCase ): _UpperCamelCase : int = VOCAB_FILES_NAMES _UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Tuple = PRETRAINED_LYRIC_TOKENS_SIZES _UpperCamelCase : Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=["v3", "v2", "v2"] , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=5 , _lowerCAmelCase="<|endoftext|>" , **_lowerCAmelCase , ): _lowercase : Tuple = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token super().__init__( unk_token=lowerCAmelCase_ , n_genres=lowerCAmelCase_ , version=lowerCAmelCase_ , max_n_lyric_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , ) _lowercase : Union[str, Any] = version _lowercase : List[str] = max_n_lyric_tokens _lowercase : Tuple = n_genres with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle: _lowercase : Union[str, Any] = json.load(lowerCAmelCase_ ) with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle: _lowercase : Dict = json.load(lowerCAmelCase_ ) with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle: _lowercase : Optional[Any] = json.load(lowerCAmelCase_ ) _lowercase : Union[str, Any] = r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 7_9: _lowercase : Optional[int] = oov.replace(r'\-\'' , r'\-+\'' ) _lowercase : Optional[int] = regex.compile(lowerCAmelCase_ ) _lowercase : str = {v: k for k, v in self.artists_encoder.items()} _lowercase : Tuple = {v: k for k, v in self.genres_encoder.items()} _lowercase : Tuple = {v: k for k, v in self.lyrics_encoder.items()} @property def __a ( self ): return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def __a ( self ): return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[int] = [self.artists_encoder.get(lowerCAmelCase_ , 0 ) for artist in list_artists] for genres in range(len(lowerCAmelCase_ ) ): _lowercase : Dict = [self.genres_encoder.get(lowerCAmelCase_ , 0 ) for genre in list_genres[genres]] _lowercase : int = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) _lowercase : Any = [[self.lyrics_encoder.get(lowerCAmelCase_ , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def __a ( self , _lowerCAmelCase ): return list(lowerCAmelCase_ ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ): _lowercase , _lowercase , _lowercase : Tuple = self.prepare_for_tokenization(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase : Tuple = self._tokenize(lowerCAmelCase_ ) return artist, genre, lyrics def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ): for idx in range(len(self.version ) ): if self.version[idx] == "v3": _lowercase : Tuple = artists[idx].lower() _lowercase : Tuple = [genres[idx].lower()] else: _lowercase : Union[str, Any] = self._normalize(artists[idx] ) + '.v2' _lowercase : Optional[Any] = [ self._normalize(lowerCAmelCase_ ) + '.v2' for genre in genres[idx].split('_' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": _lowercase : int = regex.compile(r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' ) _lowercase : Any = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n' _lowercase : Optional[int] = {vocab[index]: index + 1 for index in range(len(lowerCAmelCase_ ) )} _lowercase : Optional[Any] = 0 _lowercase : List[Any] = len(lowerCAmelCase_ ) + 1 _lowercase : Tuple = self.vocab _lowercase : int = {v: k for k, v in self.vocab.items()} _lowercase : Union[str, Any] = '' else: _lowercase : Any = regex.compile(r'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' ) _lowercase : List[str] = self._run_strip_accents(lowerCAmelCase_ ) _lowercase : Optional[Any] = lyrics.replace('\\' , '\n' ) _lowercase : int = self.out_of_vocab.sub('' , lowerCAmelCase_ ), [], [] return artists, genres, lyrics def __a ( self , _lowerCAmelCase ): _lowercase : Union[str, Any] = unicodedata.normalize('NFD' , lowerCAmelCase_ ) _lowercase : int = [] for char in text: _lowercase : int = unicodedata.category(lowerCAmelCase_ ) if cat == "Mn": continue output.append(lowerCAmelCase_ ) return "".join(lowerCAmelCase_ ) def __a ( self , _lowerCAmelCase ): _lowercase : Union[str, Any] = ( [chr(lowerCAmelCase_ ) for i in range(ord('a' ) , ord('z' ) + 1 )] + [chr(lowerCAmelCase_ ) for i in range(ord('A' ) , ord('Z' ) + 1 )] + [chr(lowerCAmelCase_ ) for i in range(ord('0' ) , ord('9' ) + 1 )] + ['.'] ) _lowercase : int = frozenset(lowerCAmelCase_ ) _lowercase : List[Any] = re.compile(r'_+' ) _lowercase : Any = ''.join([c if c in accepted else '_' for c in text.lower()] ) _lowercase : Dict = pattern.sub('_' , lowerCAmelCase_ ).strip('_' ) return text def __a ( self , _lowerCAmelCase ): return " ".join(lowerCAmelCase_ ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ): # Convert to TensorType if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _lowercase : int = TensorType(lowerCAmelCase_ ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( 'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' ) import tensorflow as tf _lowercase : Optional[int] = tf.constant _lowercase : int = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' ) import torch _lowercase : List[Any] = torch.tensor _lowercase : List[str] = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' ) import jax.numpy as jnp # noqa: F811 _lowercase : int = jnp.array _lowercase : List[str] = _is_jax else: _lowercase : Tuple = np.asarray _lowercase : Dict = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: _lowercase : List[str] = [inputs] if not is_tensor(lowerCAmelCase_ ): _lowercase : Tuple = as_tensor(lowerCAmelCase_ ) except: # noqa E722 raise ValueError( 'Unable to create tensor, you should probably activate truncation and/or padding ' 'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' ) return inputs def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase="pt" ): _lowercase : Tuple = [0, 0, 0] _lowercase : Optional[int] = [artist] * len(self.version ) _lowercase : str = [genres] * len(self.version ) _lowercase , _lowercase , _lowercase : Union[str, Any] = self.tokenize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase , _lowercase , _lowercase : Optional[Any] = self._convert_token_to_id(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase : Dict = [-INFINITY] * len(full_tokens[-1] ) _lowercase : str = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=lowerCAmelCase_ ) for i in range(len(self.version ) ) ] return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): if not os.path.isdir(lowerCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _lowercase : Optional[Any] = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] ) with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=lowerCAmelCase_ ) ) _lowercase : Optional[Any] = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] ) with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=lowerCAmelCase_ ) ) _lowercase : Tuple = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] ) with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=lowerCAmelCase_ ) ) return (artists_file, genres_file, lyrics_file) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : List[str] = self.artists_decoder.get(lowerCAmelCase_ ) _lowercase : List[str] = [self.genres_decoder.get(lowerCAmelCase_ ) for genre in genres_index] _lowercase : List[Any] = [self.lyrics_decoder.get(lowerCAmelCase_ ) for character in lyric_index] return artist, genres, lyrics
717
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ): _lowercase : List[str] = parent _lowercase : Optional[Any] = batch_size _lowercase : str = seq_length _lowercase : Dict = is_training _lowercase : Optional[int] = use_input_mask _lowercase : List[Any] = use_token_type_ids _lowercase : Union[str, Any] = use_labels _lowercase : Optional[Any] = vocab_size _lowercase : Optional[Any] = hidden_size _lowercase : str = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[Any] = hidden_act _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : int = max_position_embeddings _lowercase : str = type_vocab_size _lowercase : Tuple = type_sequence_label_size _lowercase : Dict = initializer_range _lowercase : List[Any] = num_labels _lowercase : List[str] = num_choices _lowercase : Dict = scope _lowercase : List[Any] = range_bbox def __a ( self ): _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _lowercase : List[str] = bbox[i, j, 3] _lowercase : Optional[int] = bbox[i, j, 1] _lowercase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowercase : Dict = bbox[i, j, 2] _lowercase : Dict = bbox[i, j, 0] _lowercase : int = t _lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase ) _lowercase : Any = None if self.use_input_mask: _lowercase : int = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : Tuple = None if self.use_token_type_ids: _lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : Tuple = None _lowercase : Union[str, Any] = None _lowercase : List[str] = None if self.use_labels: _lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : str = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : Any = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase ) _lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase ) _lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : str = self.num_labels _lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase ) _lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Any = self.num_labels _lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase ) _lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase ) _lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self ): _lowercase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : List[Any] = config_and_inputs _lowercase : Optional[Any] = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCamelCase : Optional[int] = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _UpperCamelCase : Union[str, Any] = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _UpperCamelCase : str = False _UpperCamelCase : List[str] = True _UpperCamelCase : Tuple = 10 def __a ( self ): _lowercase : Optional[int] = TFLayoutLMModelTester(self ) _lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __a ( self ): _lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) @slow def __a ( self ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def __a ( self ): pass def __magic_name__ ( ) -> Optional[int]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off _lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231 _lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 _lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231 _lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) _lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @slow def __a ( self ): _lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) # test the sequence output on [0, :3, :3] _lowercase : Optional[Any] = tf.convert_to_tensor( [[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) ) # test the pooled output on [1, :3] _lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) ) @slow def __a ( self ): # initialize model with randomly initialized sequence classification head _lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Any = model( input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar _lowercase : List[Any] = outputs.loss _lowercase : Any = (2,) self.assertEqual(loss.shape , _lowerCAmelCase ) # test the shape of the logits _lowercase : str = outputs.logits _lowercase : Dict = (2, 2) self.assertEqual(logits.shape , _lowerCAmelCase ) @slow def __a ( self ): # initialize model with randomly initialized token classification head _lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Dict = model( input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) # test the shape of the logits _lowercase : Dict = outputs.logits _lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) ) self.assertEqual(logits.shape , _lowerCAmelCase ) @slow def __a ( self ): # initialize model with randomly initialized token classification head _lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) # test the shape of the logits _lowercase : Any = tf.convert_to_tensor((2, 2_5) ) self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase ) self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
677
0
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> tuple: return (data["data"], data["target"]) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> np.ndarray: _lowercase : Any = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Predict target for test data _lowercase : Union[str, Any] = xgb.predict(SCREAMING_SNAKE_CASE ) _lowercase : str = predictions.reshape(len(SCREAMING_SNAKE_CASE ) , 1 ) return predictions def __magic_name__ ( ) -> None: _lowercase : List[str] = fetch_california_housing() _lowercase : Optional[int] = data_handling(SCREAMING_SNAKE_CASE ) _lowercase : Optional[Any] = train_test_split( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , test_size=0.25 , random_state=1 ) _lowercase : Tuple = xgboost(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Error printing print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}""" ) print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
718
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): _lowercase : List[str] = logging.get_logger() # the current default level is logging.WARNING _lowercase : Union[str, Any] = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(_lowerCAmelCase ) def __a ( self ): _lowercase : List[str] = logging.get_verbosity() _lowercase : int = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : Tuple = 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(_lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def __a ( self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var _lowercase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : int = os.getenv('TRANSFORMERS_VERBOSITY' , _lowerCAmelCase ) _lowercase : Optional[Any] = logging.log_levels[env_level_str] _lowercase : Dict = logging.get_verbosity() self.assertEqual( _lowerCAmelCase , _lowerCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level _lowercase : Any = '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def __a ( self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() _lowercase : Tuple = logging.logging.getLogger() with CaptureLogger(_lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def __a ( self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() _lowercase : str = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : List[str] = 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning_advice(_lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning_advice(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def __magic_name__ ( ) -> List[str]: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
677
0
import string import numpy def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: return b if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE ) class lowerCAmelCase_ : _UpperCamelCase : Optional[int] = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) _UpperCamelCase : Tuple = numpy.vectorize(lambda __snake_case : x % 36 ) _UpperCamelCase : List[str] = numpy.vectorize(SCREAMING_SNAKE_CASE__ ) def __init__( self , _lowerCAmelCase ): _lowercase : Dict = self.modulus(snake_case__ ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key _lowercase : Tuple = encrypt_key.shape[0] def __a ( self , _lowerCAmelCase ): return self.key_string.index(snake_case__ ) def __a ( self , _lowerCAmelCase ): return self.key_string[round(snake_case__ )] def __a ( self ): _lowercase : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _lowercase : List[Any] = det % len(self.key_string ) _lowercase : Dict = len(self.key_string ) if greatest_common_divisor(snake_case__ , len(self.key_string ) ) != 1: _lowercase : Optional[Any] = ( F"""determinant modular {req_l} of encryption key({det}) """ F"""is not co prime w.r.t {req_l}.\nTry another key.""" ) raise ValueError(snake_case__ ) def __a ( self , _lowerCAmelCase ): _lowercase : List[str] = [char for char in text.upper() if char in self.key_string] _lowercase : str = chars[-1] while len(snake_case__ ) % self.break_key != 0: chars.append(snake_case__ ) return "".join(snake_case__ ) def __a ( self , _lowerCAmelCase ): _lowercase : Any = self.process_text(text.upper() ) _lowercase : Tuple = "" for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ): _lowercase : Optional[int] = text[i : i + self.break_key] _lowercase : List[str] = [self.replace_letters(snake_case__ ) for char in batch] _lowercase : Optional[int] = numpy.array([vec] ).T _lowercase : List[str] = self.modulus(self.encrypt_key.dot(snake_case__ ) ).T.tolist()[ 0 ] _lowercase : List[str] = "".join( self.replace_digits(snake_case__ ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def __a ( self ): _lowercase : int = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _lowercase : Optional[Any] = det % len(self.key_string ) _lowercase : Dict = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: _lowercase : Tuple = i break _lowercase : List[str] = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(snake_case__ ) ) def __a ( self , _lowerCAmelCase ): _lowercase : str = self.make_decrypt_key() _lowercase : int = self.process_text(text.upper() ) _lowercase : Union[str, Any] = "" for i in range(0 , len(snake_case__ ) - self.break_key + 1 , self.break_key ): _lowercase : Optional[Any] = text[i : i + self.break_key] _lowercase : Union[str, Any] = [self.replace_letters(snake_case__ ) for char in batch] _lowercase : Optional[int] = numpy.array([vec] ).T _lowercase : Union[str, Any] = self.modulus(decrypt_key.dot(snake_case__ ) ).T.tolist()[0] _lowercase : Optional[int] = "".join( self.replace_digits(snake_case__ ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def __magic_name__ ( ) -> None: _lowercase : Optional[int] = int(input('Enter the order of the encryption key: ' ) ) _lowercase : Tuple = [] print('Enter each row of the encryption key with space separated integers' ) for _ in range(SCREAMING_SNAKE_CASE ): _lowercase : Optional[Any] = [int(SCREAMING_SNAKE_CASE ) for x in input().split()] hill_matrix.append(SCREAMING_SNAKE_CASE ) _lowercase : Union[str, Any] = HillCipher(numpy.array(SCREAMING_SNAKE_CASE ) ) print('Would you like to encrypt or decrypt some text? (1 or 2)' ) _lowercase : List[Any] = input('\n1. Encrypt\n2. Decrypt\n' ) if option == "1": _lowercase : Optional[int] = input('What text would you like to encrypt?: ' ) print('Your encrypted text is:' ) print(hc.encrypt(SCREAMING_SNAKE_CASE ) ) elif option == "2": _lowercase : Dict = input('What text would you like to decrypt?: ' ) print('Your decrypted text is:' ) print(hc.decrypt(SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
719
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): UpperCamelCase = "pt" elif is_tf_available(): UpperCamelCase = "tf" else: UpperCamelCase = "jax" class lowerCAmelCase_ ( __snake_case , unittest.TestCase ): _UpperCamelCase : Dict = PerceiverTokenizer _UpperCamelCase : str = False def __a ( self ): super().setUp() _lowercase : List[Any] = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def __a ( self , **_lowerCAmelCase ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=2_0 , _lowerCAmelCase=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. _lowercase : Union[str, Any] = [] for i in range(len(_lowerCAmelCase ) ): try: _lowercase : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) _lowercase : List[Any] = list(filter(lambda _lowerCAmelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _lowerCAmelCase ) ) _lowercase : Union[str, Any] = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCAmelCase ) , _lowerCAmelCase ) ) if max_length is not None and len(_lowerCAmelCase ) > max_length: _lowercase : Any = toks[:max_length] if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0: while len(_lowerCAmelCase ) < min_length: _lowercase : Optional[Any] = toks + toks # toks_str = [t[1] for t in toks] _lowercase : Optional[Any] = [t[0] for t in toks] # Ensure consistency _lowercase : Any = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) if " " not in output_txt and len(_lowerCAmelCase ) > 1: _lowercase : List[str] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase ) ) if with_prefix_space: _lowercase : List[Any] = ' ' + output_txt _lowercase : Dict = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) return output_txt, output_ids def __a ( self ): _lowercase : Dict = self.perceiver_tokenizer _lowercase : Optional[Any] = 'Unicode €.' _lowercase : str = tokenizer(_lowerCAmelCase ) _lowercase : int = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded['input_ids'] , _lowerCAmelCase ) # decoding _lowercase : List[Any] = tokenizer.decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , '[CLS]Unicode €.[SEP]' ) _lowercase : Union[str, Any] = tokenizer('e è é ê ë' ) _lowercase : List[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded['input_ids'] , _lowerCAmelCase ) # decoding _lowercase : int = tokenizer.decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def __a ( self ): _lowercase : List[str] = self.perceiver_tokenizer _lowercase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off _lowercase : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on _lowercase : List[Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) if FRAMEWORK != "jax": _lowercase : int = list(batch.input_ids.numpy()[0] ) else: _lowercase : List[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def __a ( self ): _lowercase : List[Any] = self.perceiver_tokenizer _lowercase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _lowercase : List[str] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _lowerCAmelCase ) self.assertIn('attention_mask' , _lowerCAmelCase ) self.assertNotIn('decoder_input_ids' , _lowerCAmelCase ) self.assertNotIn('decoder_attention_mask' , _lowerCAmelCase ) def __a ( self ): _lowercase : Optional[int] = self.perceiver_tokenizer _lowercase : Optional[Any] = [ 'Summary of the text.', 'Another summary.', ] _lowercase : Optional[int] = tokenizer( text_target=_lowerCAmelCase , max_length=3_2 , padding='max_length' , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) self.assertEqual(3_2 , targets['input_ids'].shape[1] ) def __a ( self ): # safety check on max_len default value so we are sure the test works _lowercase : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test _lowercase : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : Dict = tempfile.mkdtemp() _lowercase : Tuple = ' He is very happy, UNwant\u00E9d,running' _lowercase : Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) _lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase ) _lowercase : Optional[Any] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) shutil.rmtree(_lowerCAmelCase ) _lowercase : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : List[str] = tempfile.mkdtemp() _lowercase : int = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) _lowercase : Any = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _lowercase : Tuple = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) _lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase ) _lowercase : Tuple = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) _lowercase : List[Any] = tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _lowercase : List[str] = json.load(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _lowercase : Tuple = json.load(_lowerCAmelCase ) _lowercase : Any = [F"""<extra_id_{i}>""" for i in range(1_2_5 )] _lowercase : str = added_tokens_extra_ids + [ 'an_additional_special_token' ] _lowercase : Optional[int] = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_lowerCAmelCase , _lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_lowerCAmelCase , _lowerCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowercase : Optional[int] = tokenizer_class.from_pretrained( _lowerCAmelCase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowercase : int = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_lowerCAmelCase )] _lowercase : Tuple = tokenizer_class.from_pretrained( _lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __a ( self ): _lowercase : str = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , '�' ) def __a ( self ): pass def __a ( self ): pass def __a ( self ): pass def __a ( self ): pass def __a ( self ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens _lowercase : List[str] = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _lowercase : Optional[Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] _lowercase : Optional[Any] = tokenizer.convert_tokens_to_string(_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
677
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off UpperCamelCase = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377, 1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211, 4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786, 11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791, 17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409, 34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361 ] UpperCamelCase = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627, 3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647, 7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793, 14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675, 22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865, 42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362 ] class lowerCAmelCase_ ( __lowerCamelCase ): _UpperCamelCase : Union[str, Any] = "whisper" _UpperCamelCase : str = ["past_key_values"] _UpperCamelCase : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , _lowerCAmelCase=5_1_8_6_5 , _lowerCAmelCase=8_0 , _lowerCAmelCase=6 , _lowerCAmelCase=4 , _lowerCAmelCase=6 , _lowerCAmelCase=4 , _lowerCAmelCase=1_5_3_6 , _lowerCAmelCase=1_5_3_6 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=5_0_2_5_7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=False , _lowerCAmelCase=1_5_0_0 , _lowerCAmelCase=4_4_8 , _lowerCAmelCase=5_0_2_5_6 , _lowerCAmelCase=5_0_2_5_6 , _lowerCAmelCase=5_0_2_5_6 , _lowerCAmelCase=None , _lowerCAmelCase=[2_2_0, 5_0_2_5_6] , _lowerCAmelCase=False , _lowerCAmelCase=2_5_6 , _lowerCAmelCase=False , _lowerCAmelCase=0.05 , _lowerCAmelCase=1_0 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=1_0 , _lowerCAmelCase=0 , _lowerCAmelCase=7 , **_lowerCAmelCase , ): _lowercase : Dict = vocab_size _lowercase : str = num_mel_bins _lowercase : Dict = d_model _lowercase : int = encoder_layers _lowercase : Dict = encoder_attention_heads _lowercase : List[Any] = decoder_layers _lowercase : Optional[Any] = decoder_attention_heads _lowercase : Union[str, Any] = decoder_ffn_dim _lowercase : List[Any] = encoder_ffn_dim _lowercase : str = dropout _lowercase : Any = attention_dropout _lowercase : int = activation_dropout _lowercase : List[Any] = activation_function _lowercase : Tuple = init_std _lowercase : str = encoder_layerdrop _lowercase : Optional[int] = decoder_layerdrop _lowercase : List[Any] = use_cache _lowercase : List[Any] = encoder_layers _lowercase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True _lowercase : Union[str, Any] = max_source_positions _lowercase : Optional[Any] = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. _lowercase : str = classifier_proj_size _lowercase : List[str] = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowercase : str = apply_spec_augment _lowercase : str = mask_time_prob _lowercase : Dict = mask_time_length _lowercase : Tuple = mask_time_min_masks _lowercase : Tuple = mask_feature_prob _lowercase : str = mask_feature_length _lowercase : Optional[int] = mask_feature_min_masks _lowercase : Any = median_filter_width super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) class lowerCAmelCase_ ( __lowerCamelCase ): @property def __a ( self ): _lowercase : Optional[Any] = OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ] ) if self.use_past: _lowercase : Union[str, Any] = {0: 'batch'} else: _lowercase : List[Any] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='inputs' ) return common_inputs def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = 2_2_0_5_0 , _lowerCAmelCase = 5.0 , _lowerCAmelCase = 2_2_0 , ): _lowercase : Dict = OrderedDict() _lowercase : Optional[int] = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , ) _lowercase : int = encoder_inputs['input_features'].shape[2] _lowercase : Optional[Any] = encoder_sequence_length // 2 if self.use_past else seq_length _lowercase : List[Any] = super().generate_dummy_inputs( preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _lowercase : Tuple = encoder_inputs.pop('input_features' ) _lowercase : Union[str, Any] = decoder_inputs.pop('decoder_input_ids' ) if "past_key_values" in decoder_inputs: _lowercase : Optional[Any] = decoder_inputs.pop('past_key_values' ) return dummy_inputs @property def __a ( self ): return 1E-3
720
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["ConditionalDetrFeatureExtractor"] UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { """EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class lowerCAmelCase_ ( _snake_case ): _UpperCamelCase : List[str] = """gptj""" _UpperCamelCase : Dict = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , _lowerCAmelCase=5_0_4_0_0 , _lowerCAmelCase=2_0_4_8 , _lowerCAmelCase=4_0_9_6 , _lowerCAmelCase=2_8 , _lowerCAmelCase=1_6 , _lowerCAmelCase=6_4 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_new" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=5_0_2_5_6 , _lowerCAmelCase=5_0_2_5_6 , _lowerCAmelCase=False , **_lowerCAmelCase , ): _lowercase : int = vocab_size _lowercase : Union[str, Any] = n_positions _lowercase : int = n_embd _lowercase : int = n_layer _lowercase : str = n_head _lowercase : Tuple = n_inner _lowercase : Union[str, Any] = rotary_dim _lowercase : List[Any] = activation_function _lowercase : Dict = resid_pdrop _lowercase : Optional[Any] = embd_pdrop _lowercase : int = attn_pdrop _lowercase : Any = layer_norm_epsilon _lowercase : Optional[int] = initializer_range _lowercase : int = use_cache _lowercase : Any = bos_token_id _lowercase : List[Any] = eos_token_id super().__init__( bos_token_id=snake_case_ , eos_token_id=snake_case_ , tie_word_embeddings=snake_case_ , **snake_case_ ) class lowerCAmelCase_ ( _snake_case ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None , _lowerCAmelCase = False , ): super().__init__(snake_case_ , task=snake_case_ , patching_specs=snake_case_ , use_past=snake_case_ ) if not getattr(self._config , 'pad_token_id' , snake_case_ ): # TODO: how to do that better? _lowercase : List[Any] = 0 @property def __a ( self ): _lowercase : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(snake_case_ , direction='inputs' ) _lowercase : Union[str, Any] = {0: "batch", 1: "past_sequence + sequence"} else: _lowercase : Optional[Any] = {0: "batch", 1: "sequence"} return common_inputs @property def __a ( self ): return self._config.n_layer @property def __a ( self ): return self._config.n_head def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ): _lowercase : List[Any] = super(snake_case_ , self ).generate_dummy_inputs( snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ ) # We need to order the input in the way they appears in the forward() _lowercase : Optional[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _lowercase : List[str] = common_inputs["input_ids"].shape # Not using the same length for past_key_values _lowercase : Tuple = seqlen + 2 _lowercase : Tuple = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _lowercase : str = [ (torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(self.num_layers ) ] _lowercase : List[Any] = common_inputs["attention_mask"] if self.use_past: _lowercase : int = ordered_inputs["attention_mask"].dtype _lowercase : Optional[Any] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 ) return ordered_inputs @property def __a ( self ): return 1_3
721
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Tuple = "ClapFeatureExtractor" _UpperCamelCase : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): _lowercase : str = kwargs.pop('sampling_rate' , _lowerCAmelCase ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: _lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if audios is not None: _lowercase : Any = self.feature_extractor( _lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if text is not None and audios is not None: _lowercase : Union[str, Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase ) def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property def __a ( self ): _lowercase : Dict = self.tokenizer.model_input_names _lowercase : Any = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
677
0
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): _lowercase : Dict = parent _lowercase : str = batch_size _lowercase : Optional[Any] = seq_length _lowercase : List[str] = is_training _lowercase : List[Any] = use_input_mask _lowercase : Union[str, Any] = use_token_type_ids _lowercase : Optional[Any] = use_labels _lowercase : Tuple = vocab_size _lowercase : Union[str, Any] = hidden_size _lowercase : List[Any] = num_hidden_layers _lowercase : str = num_attention_heads _lowercase : int = intermediate_size _lowercase : Union[str, Any] = hidden_act _lowercase : Dict = hidden_dropout_prob _lowercase : List[Any] = attention_probs_dropout_prob _lowercase : Union[str, Any] = max_position_embeddings _lowercase : Optional[int] = type_vocab_size _lowercase : Dict = type_sequence_label_size _lowercase : Dict = initializer_range _lowercase : str = num_labels _lowercase : int = num_choices _lowercase : Dict = scope def __a ( self ): _lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : List[Any] = None if self.use_input_mask: _lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : int = None if self.use_token_type_ids: _lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : Dict = None _lowercase : List[Any] = None _lowercase : List[str] = None if self.use_labels: _lowercase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self ): return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[int] = NystromformerModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() _lowercase : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) _lowercase : Any = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) _lowercase : Any = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Tuple = NystromformerForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() _lowercase : Tuple = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = NystromformerForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() _lowercase : int = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = self.num_labels _lowercase : Any = NystromformerForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() _lowercase : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : List[str] = self.num_labels _lowercase : Optional[Any] = NystromformerForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() _lowercase : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Any = self.num_choices _lowercase : List[str] = NystromformerForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() _lowercase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowercase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowercase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowercase : Union[str, Any] = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self ): _lowercase : Dict = self.prepare_config_and_inputs() ( _lowercase ) : Union[str, Any] = config_and_inputs _lowercase : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( lowercase__ , lowercase__ , unittest.TestCase ): _UpperCamelCase : List[Any] = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) _UpperCamelCase : int = ( { "feature-extraction": NystromformerModel, "fill-mask": NystromformerForMaskedLM, "question-answering": NystromformerForQuestionAnswering, "text-classification": NystromformerForSequenceClassification, "token-classification": NystromformerForTokenClassification, "zero-shot": NystromformerForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase : int = False _UpperCamelCase : List[str] = False def __a ( self ): _lowercase : List[Any] = NystromformerModelTester(self ) _lowercase : Tuple = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): _lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def __a ( self ): _lowercase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowercase : Dict = type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def __a ( self ): _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ ) def __a ( self ): _lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) def __a ( self ): _lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) def __a ( self ): _lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ ) def __a ( self ): _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) @slow def __a ( self ): for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : List[Any] = NystromformerModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): @slow def __a ( self ): _lowercase : Dict = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' ) _lowercase : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): _lowercase : Tuple = model(UpperCAmelCase__ )[0] _lowercase : int = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , UpperCAmelCase__ ) _lowercase : Dict = torch.tensor( [[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) ) @slow def __a ( self ): _lowercase : Optional[int] = '''the [MASK] of Belgium is Brussels''' _lowercase : Dict = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' ) _lowercase : Optional[int] = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' ) _lowercase : List[Any] = tokenizer(UpperCAmelCase__ , return_tensors='pt' ) with torch.no_grad(): _lowercase : Optional[int] = model(encoding.input_ids ).logits _lowercase : Tuple = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCAmelCase__ ) , 'capital' )
700
from __future__ import annotations from typing import Any class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase ): _lowercase : Any = num_of_nodes _lowercase : list[list[int]] = [] _lowercase : dict[int, int] = {} def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): self.m_edges.append([u_node, v_node, weight] ) def __a ( self , _lowerCAmelCase ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def __a ( self , _lowerCAmelCase ): if self.m_component[u_node] != u_node: for k in self.m_component: _lowercase : Optional[int] = self.find_component(_lowerCAmelCase ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): if component_size[u_node] <= component_size[v_node]: _lowercase : str = v_node component_size[v_node] += component_size[u_node] self.set_component(_lowerCAmelCase ) elif component_size[u_node] >= component_size[v_node]: _lowercase : Any = self.find_component(_lowerCAmelCase ) component_size[u_node] += component_size[v_node] self.set_component(_lowerCAmelCase ) def __a ( self ): _lowercase : Any = [] _lowercase : Optional[Any] = 0 _lowercase : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) _lowercase : str = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: _lowercase , _lowercase , _lowercase : List[str] = edge _lowercase : Union[str, Any] = self.m_component[u] _lowercase : Union[str, Any] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): _lowercase : str = [u, v, w] for edge in minimum_weight_edge: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowercase , _lowercase , _lowercase : int = edge _lowercase : Optional[int] = self.m_component[u] _lowercase : Optional[Any] = self.m_component[v] if u_component != v_component: mst_weight += w self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 _lowercase : str = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def __magic_name__ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
677
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json", "uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json", "uclanlp/visualbert-vqa-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json", "uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json", "uclanlp/visualbert-vcr-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json", "uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json", "uclanlp/visualbert-nlvr2-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json" ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class lowerCAmelCase_ ( __UpperCAmelCase ): _UpperCamelCase : str = "visual_bert" def __init__( self , _lowerCAmelCase=3_0_5_2_2 , _lowerCAmelCase=7_6_8 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_2 , _lowerCAmelCase=1_2 , _lowerCAmelCase=3_0_7_2 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , **_lowerCAmelCase , ): super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) _lowercase : Optional[int] = vocab_size _lowercase : int = max_position_embeddings _lowercase : int = hidden_size _lowercase : Optional[Any] = visual_embedding_dim _lowercase : Union[str, Any] = num_hidden_layers _lowercase : int = num_attention_heads _lowercase : List[Any] = intermediate_size _lowercase : Dict = hidden_act _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : Tuple = initializer_range _lowercase : Optional[Any] = type_vocab_size _lowercase : Tuple = layer_norm_eps _lowercase : Tuple = bypass_transformer _lowercase : List[str] = special_visual_initialize
701
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : Tuple = {} _lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids'] _lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] ) return output UpperCamelCase = HfArgumentParser(PretokenizationArguments) UpperCamelCase = parser.parse_args() if args.num_workers is None: UpperCamelCase = multiprocessing.cpu_count() UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCamelCase = time.time() UpperCamelCase = load_dataset(args.dataset_name, split="train") print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() UpperCamelCase = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
677
0
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): _lowercase : Optional[Any] = '''ylacombe/bark-small''' _lowercase : int = tempfile.mkdtemp() _lowercase : List[Any] = '''en_speaker_1''' _lowercase : List[str] = '''This is a test string''' _lowercase : Optional[int] = '''speaker_embeddings_path.json''' _lowercase : Any = '''speaker_embeddings''' def __a ( self , **_lowerCAmelCase ): return AutoTokenizer.from_pretrained(self.checkpoint , **__lowerCamelCase ) def __a ( self ): shutil.rmtree(self.tmpdirname ) def __a ( self ): _lowercase : str = self.get_tokenizer() _lowercase : Dict = BarkProcessor(tokenizer=__lowerCamelCase ) processor.save_pretrained(self.tmpdirname ) _lowercase : str = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def __a ( self ): _lowercase : Dict = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _lowercase : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowercase : int = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def __a ( self ): _lowercase : int = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _lowercase : Union[str, Any] = 3_5 _lowercase : List[str] = 2 _lowercase : str = 8 _lowercase : Dict = { '''semantic_prompt''': np.ones(__lowerCamelCase ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _lowercase : Dict = processor(text=self.input_string , voice_preset=__lowerCamelCase ) _lowercase : Optional[Any] = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file _lowercase : Any = os.path.join(self.tmpdirname , 'file.npz' ) np.savez(__lowerCamelCase , **__lowerCamelCase ) _lowercase : str = processor(text=self.input_string , voice_preset=__lowerCamelCase ) _lowercase : Tuple = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowerCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub _lowercase : Optional[int] = processor(text=self.input_string , voice_preset=self.voice_preset ) def __a ( self ): _lowercase : int = self.get_tokenizer() _lowercase : Optional[Any] = BarkProcessor(tokenizer=__lowerCamelCase ) _lowercase : List[str] = processor(text=self.input_string ) _lowercase : Optional[int] = tokenizer( self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
702
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration} UpperCamelCase = {"facebook/bart-base": BartTokenizer} def __magic_name__ ( ) -> str: _lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' ) parser.add_argument( '--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' ) parser.add_argument( '--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , ) parser.add_argument( '--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=( 'Number of beams to use for evaluation. This argument will be ' 'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.' ) , ) parser.add_argument( '--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , ) parser.add_argument( '--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , ) parser.add_argument( '--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , ) parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' ) _lowercase : Optional[Any] = parser.parse_args() return args def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]: _lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) _lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ) if model_name in ["facebook/bart-base"]: _lowercase : Dict = 0 _lowercase : Optional[int] = None _lowercase : Union[str, Any] = 0 return huggingface_model, tokenizer def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: model.eval() _lowercase : List[Any] = None _lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) ) with torch.no_grad(): _lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.' _lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device ) _lowercase : str = model.generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( SCREAMING_SNAKE_CASE , ( inputs['input_ids'], inputs['attention_mask'], num_beams, max_length, model.config.decoder_start_token_id, ) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={ 'input_ids': {0: 'batch', 1: 'seq'}, 'output_ids': {0: 'batch', 1: 'seq_out'}, } , example_outputs=SCREAMING_SNAKE_CASE , ) logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) ) _lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) ) logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) ) _lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE ) _lowercase : Union[str, Any] = ort_sess.run( SCREAMING_SNAKE_CASE , { 'input_ids': inputs['input_ids'].cpu().numpy(), 'attention_mask': inputs['attention_mask'].cpu().numpy(), 'num_beams': np.array(SCREAMING_SNAKE_CASE ), 'max_length': np.array(SCREAMING_SNAKE_CASE ), 'decoder_start_token_id': np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info('Model outputs from torch and ONNX Runtime are similar.' ) logger.info('Success.' ) def __magic_name__ ( ) -> Any: _lowercase : Dict = parse_args() _lowercase : Union[str, Any] = 5 _lowercase : Union[str, Any] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() _lowercase : Optional[Any] = torch.device(args.device ) _lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE ) if model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' ) model.to(SCREAMING_SNAKE_CASE ) if args.max_length: _lowercase : Any = args.max_length if args.num_beams: _lowercase : List[str] = args.num_beams if args.output_file_path: _lowercase : Union[str, Any] = args.output_file_path else: _lowercase : Tuple = 'BART.onnx' logger.info('Exporting model to ONNX' ) export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
677
0
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ): _UpperCamelCase : List[str] = BioGptTokenizer _UpperCamelCase : List[Any] = False def __a ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _lowercase : Optional[int] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] _lowercase : Optional[int] = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) _lowercase : Dict = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] _lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(_lowercase ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(_lowercase ) ) def __a ( self , _lowerCAmelCase ): _lowercase : str = 'lower newer' _lowercase : Any = 'lower newer' return input_text, output_text def __a ( self ): _lowercase : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) _lowercase : List[str] = 'lower' _lowercase : Tuple = ['low', 'er</w>'] _lowercase : Optional[int] = tokenizer.tokenize(_lowercase ) self.assertListEqual(_lowercase , _lowercase ) _lowercase : Union[str, Any] = tokens + ['<unk>'] _lowercase : Optional[Any] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase ) @slow def __a ( self ): _lowercase : Optional[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) _lowercase : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=_lowercase ) _lowercase : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_lowercase ) _lowercase : Any = tokenizer.build_inputs_with_special_tokens(_lowercase ) _lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
703
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCamelCase : Union[str, Any] = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) _UpperCamelCase : List[Any] = ( { "feature-extraction": TFMobileBertModel, "fill-mask": TFMobileBertForMaskedLM, "question-answering": TFMobileBertForQuestionAnswering, "text-classification": TFMobileBertForSequenceClassification, "token-classification": TFMobileBertForTokenClassification, "zero-shot": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) _UpperCamelCase : int = False _UpperCamelCase : Optional[int] = False def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ): _lowercase : int = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if return_labels: if model_class in get_values(_lowerCAmelCase ): _lowercase : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class lowerCAmelCase_ ( __snake_case ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): _lowercase : Optional[Any] = parent _lowercase : str = batch_size _lowercase : Optional[int] = seq_length _lowercase : Tuple = is_training _lowercase : List[Any] = use_input_mask _lowercase : Optional[Any] = use_token_type_ids _lowercase : Any = use_labels _lowercase : str = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[int] = intermediate_size _lowercase : Tuple = hidden_act _lowercase : Dict = hidden_dropout_prob _lowercase : Optional[int] = attention_probs_dropout_prob _lowercase : Tuple = max_position_embeddings _lowercase : List[str] = type_vocab_size _lowercase : Optional[Any] = type_sequence_label_size _lowercase : List[Any] = initializer_range _lowercase : List[str] = num_labels _lowercase : Union[str, Any] = num_choices _lowercase : List[str] = scope _lowercase : Union[str, Any] = embedding_size def __a ( self ): _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : Optional[int] = None if self.use_input_mask: _lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : int = None if self.use_token_type_ids: _lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : Dict = None _lowercase : Any = None _lowercase : int = None if self.use_labels: _lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : Optional[Any] = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Union[str, Any] = TFMobileBertModel(config=_lowerCAmelCase ) _lowercase : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : Union[str, Any] = model(_lowerCAmelCase ) _lowercase : Tuple = [input_ids, input_mask] _lowercase : str = model(_lowerCAmelCase ) _lowercase : List[str] = model(_lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[int] = TFMobileBertForMaskedLM(config=_lowerCAmelCase ) _lowercase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : int = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Any = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase ) _lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : Optional[int] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFMobileBertForPreTraining(config=_lowerCAmelCase ) _lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : Union[str, Any] = model(_lowerCAmelCase ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[int] = self.num_labels _lowercase : Tuple = TFMobileBertForSequenceClassification(config=_lowerCAmelCase ) _lowercase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : List[str] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = self.num_choices _lowercase : List[str] = TFMobileBertForMultipleChoice(config=_lowerCAmelCase ) _lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowercase : Optional[int] = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowercase : Tuple = tf.tile(tf.expand_dims(_lowerCAmelCase , 1 ) , (1, self.num_choices, 1) ) _lowercase : str = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } _lowercase : Union[str, Any] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : List[str] = self.num_labels _lowercase : int = TFMobileBertForTokenClassification(config=_lowerCAmelCase ) _lowercase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : List[str] = model(_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Tuple = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase ) _lowercase : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} _lowercase : int = model(_lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self ): _lowercase : List[str] = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : int = config_and_inputs _lowercase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict def __a ( self ): _lowercase : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self ) _lowercase : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase ) def __a ( self ): _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase ) def __a ( self ): _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase ) def __a ( self ): _lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase ) def __a ( self ): _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase ) def __a ( self ): _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase ) def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase ) @slow def __a ( self ): # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: _lowercase : List[str] = TFMobileBertModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @slow def __a ( self ): _lowercase : Dict = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' ) _lowercase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) _lowercase : List[str] = model(_lowerCAmelCase )[0] _lowercase : str = [1, 6, 3_0_5_2_2] self.assertEqual(output.shape , _lowerCAmelCase ) _lowercase : List[Any] = tf.constant( [ [ [-4.5_91_95_47, -9.24_82_95, -9.64_52_56], [-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37], [-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 )
677
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCAmelCase_ ( snake_case__ ): _UpperCamelCase : Any = ["pixel_values"] def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 2_5_5 , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , **_lowerCAmelCase , ): super().__init__(**lowercase_ ) _lowercase : List[str] = size if size is not None else {"shortest_edge": 2_2_4} _lowercase : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) _lowercase : List[Any] = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} _lowercase : Any = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='crop_size' ) _lowercase : str = do_resize _lowercase : Dict = size _lowercase : List[str] = resample _lowercase : Optional[Any] = do_center_crop _lowercase : Union[str, Any] = crop_size _lowercase : List[str] = do_rescale _lowercase : str = rescale_factor _lowercase : Optional[Any] = do_normalize _lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _lowercase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD _lowercase : Optional[int] = do_convert_rgb def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ): _lowercase : Union[str, Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) _lowercase : Tuple = get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ): _lowercase : Union[str, Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_ ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ): return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ): return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ): _lowercase : Optional[int] = do_resize if do_resize is not None else self.do_resize _lowercase : Optional[int] = size if size is not None else self.size _lowercase : List[str] = get_size_dict(lowercase_ , param_name='size' , default_to_square=lowercase_ ) _lowercase : Tuple = resample if resample is not None else self.resample _lowercase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop _lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size _lowercase : Optional[int] = get_size_dict(lowercase_ , param_name='crop_size' , default_to_square=lowercase_ ) _lowercase : int = do_rescale if do_rescale is not None else self.do_rescale _lowercase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase : Tuple = do_normalize if do_normalize is not None else self.do_normalize _lowercase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean _lowercase : List[str] = image_std if image_std is not None else self.image_std _lowercase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _lowercase : Union[str, Any] = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _lowercase : Any = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. _lowercase : List[str] = [to_numpy_array(lowercase_ ) for image in images] if do_resize: _lowercase : List[str] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: _lowercase : Tuple = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: _lowercase : List[Any] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: _lowercase : Optional[Any] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] _lowercase : Tuple = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] _lowercase : int = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
704
import qiskit def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> qiskit.result.counts.Counts: _lowercase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register _lowercase : Optional[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator _lowercase : Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase = single_qubit_measure(2, 2) print(f'''Total count for various states are: {counts}''')
677
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
705
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCamelCase = "platform" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Dict: if attention_mask is None: _lowercase : str = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: _lowercase : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: _lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowercase : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowercase : str = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=9_9 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=0.02 , ): _lowercase : List[str] = parent _lowercase : List[Any] = batch_size _lowercase : Optional[Any] = seq_length _lowercase : Optional[Any] = is_training _lowercase : Tuple = use_labels _lowercase : Dict = vocab_size _lowercase : Any = hidden_size _lowercase : Optional[Any] = num_hidden_layers _lowercase : Union[str, Any] = num_attention_heads _lowercase : Tuple = intermediate_size _lowercase : Any = hidden_act _lowercase : Optional[Any] = hidden_dropout_prob _lowercase : Tuple = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : str = eos_token_id _lowercase : int = pad_token_id _lowercase : Tuple = bos_token_id _lowercase : List[Any] = initializer_range def __a ( self ): _lowercase : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) _lowercase : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) _lowercase : List[str] = shift_tokens_right(_lowerCAmelCase , 1 , 2 ) _lowercase : Tuple = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , ) _lowercase : List[Any] = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return config, inputs_dict def __a ( self ): _lowercase , _lowercase : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = 2_0 _lowercase : List[Any] = model_class_name(_lowerCAmelCase ) _lowercase : List[Any] = model.encode(inputs_dict['input_ids'] ) _lowercase , _lowercase : int = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _lowercase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase ) _lowercase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) _lowercase : int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowercase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _lowercase : int = model.decode( decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , ) _lowercase : List[Any] = model.decode(_lowerCAmelCase , _lowerCAmelCase ) _lowercase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Dict = 2_0 _lowercase : Any = model_class_name(_lowerCAmelCase ) _lowercase : int = model.encode(inputs_dict['input_ids'] ) _lowercase , _lowercase : Optional[int] = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) _lowercase : Union[str, Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) _lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase ) _lowercase : int = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) _lowercase : List[Any] = model.decode( decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) _lowercase : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , ) _lowercase : Dict = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase ) _lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class lowerCAmelCase_ ( unittest.TestCase ): _UpperCamelCase : Tuple = 99 def __a ( self ): _lowercase : Dict = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) _lowercase : Union[str, Any] = input_ids.shape[0] _lowercase : Optional[int] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def __a ( self ): _lowercase , _lowercase , _lowercase : int = self._get_config_and_data() _lowercase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase ) _lowercase : Union[str, Any] = lm_model(input_ids=_lowerCAmelCase ) _lowercase : str = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['logits'].shape , _lowerCAmelCase ) def __a ( self ): _lowercase : Union[str, Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) _lowercase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCAmelCase ) _lowercase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa ) _lowercase : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa ) _lowercase : Dict = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ) _lowercase : Tuple = (*summary.shape, config.vocab_size) self.assertEqual(outputs['logits'].shape , _lowerCAmelCase ) def __a ( self ): _lowercase : Dict = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa ) _lowercase : Union[str, Any] = shift_tokens_right(_lowerCAmelCase , 1 , 2 ) _lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum() _lowercase : Dict = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_lowerCAmelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class lowerCAmelCase_ ( __snake_case , unittest.TestCase , __snake_case ): _UpperCamelCase : int = True _UpperCamelCase : Any = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) _UpperCamelCase : Any = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def __a ( self ): _lowercase : List[str] = FlaxBlenderbotSmallModelTester(self ) def __a ( self ): _lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __a ( self ): _lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) def __a ( self ): _lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowercase : Any = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) _lowercase : str = model_class(_lowerCAmelCase ) @jax.jit def encode_jitted(_lowerCAmelCase , _lowerCAmelCase=None , **_lowerCAmelCase ): return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase ) with self.subTest('JIT Enabled' ): _lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _lowercase : Dict = encode_jitted(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def __a ( self ): _lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowercase : int = model_class(_lowerCAmelCase ) _lowercase : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) _lowercase : List[Any] = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): return model.decode( decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , ) with self.subTest('JIT Enabled' ): _lowercase : Dict = decode_jitted(**_lowerCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _lowercase : Any = decode_jitted(**_lowerCAmelCase ).to_tuple() self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) ) for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __a ( self ): for model_class_name in self.all_model_classes: _lowercase : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids _lowercase : Any = np.ones((1, 1) ) * model.config.eos_token_id _lowercase : int = model(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase )
677
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
706
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Dict = "longformer" def __init__( self , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0 , _lowerCAmelCase = 2 , _lowerCAmelCase = 3_0_5_2_2 , _lowerCAmelCase = 7_6_8 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 1_2 , _lowerCAmelCase = 3_0_7_2 , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 1E-12 , _lowerCAmelCase = False , **_lowerCAmelCase , ): super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) _lowercase : Optional[int] = attention_window _lowercase : str = sep_token_id _lowercase : Optional[Any] = bos_token_id _lowercase : List[Any] = eos_token_id _lowercase : Optional[Any] = vocab_size _lowercase : List[Any] = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Optional[int] = num_attention_heads _lowercase : List[str] = hidden_act _lowercase : List[str] = intermediate_size _lowercase : List[Any] = hidden_dropout_prob _lowercase : str = attention_probs_dropout_prob _lowercase : Any = max_position_embeddings _lowercase : int = type_vocab_size _lowercase : Optional[int] = initializer_range _lowercase : List[Any] = layer_norm_eps _lowercase : List[str] = onnx_export class lowerCAmelCase_ ( __snake_case ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase = "default" , _lowerCAmelCase = None ): super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _lowercase : str = True @property def __a ( self ): if self.task == "multiple-choice": _lowercase : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowercase : int = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('global_attention_mask', dynamic_axis), ] ) @property def __a ( self ): _lowercase : Optional[int] = super().outputs if self.task == "default": _lowercase : List[str] = {0: 'batch'} return outputs @property def __a ( self ): return 1E-4 @property def __a ( self ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 1_4 ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ): _lowercase : int = super().generate_dummy_inputs( preprocessor=_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly _lowercase : str = torch.zeros_like(inputs['input_ids'] ) # make every second token global _lowercase : Any = 1 return inputs
677
0
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: _lowercase : Tuple = tmp_path / 'cache' _lowercase : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowercase : List[Any] = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read() _check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: _lowercase : int = tmp_path / 'cache' _lowercase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _lowercase : List[Any] = features.copy() if features else default_expected_features _lowercase : int = ( Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _lowercase : Union[str, Any] = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( 'features' , [ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] , ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: _lowercase : str = tmp_path / 'cache' _lowercase : int = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'} _lowercase : List[str] = features.copy() if features else default_expected_features _lowercase : Optional[int] = ( Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _lowercase : Optional[int] = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: _lowercase : Optional[int] = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'} _lowercase : List[Any] = features.copy() _lowercase : str = ( Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _lowercase : Optional[Any] = tmp_path / 'cache' _lowercase : int = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Optional[Any] = tmp_path / 'cache' _lowercase : List[str] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _lowercase : int = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE ).read() _check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _lowercase : Union[str, Any] = jsonl_path elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _lowercase : Dict = [jsonl_path] _lowercase : Any = tmp_path / 'cache' _lowercase : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _lowercase : Union[str, Any] = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=("train",) ) -> Any: assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for split in splits: _lowercase : Tuple = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: _lowercase : int = tmp_path / 'cache' _lowercase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowercase : List[Any] = JsonDatasetReader({'train': jsonl_path} , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read() _check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: _lowercase : Optional[int] = tmp_path / 'cache' _lowercase : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _lowercase : Optional[Any] = features.copy() if features else default_expected_features _lowercase : Any = ( Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _lowercase : str = JsonDatasetReader({'train': jsonl_path} , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: if split: _lowercase : Optional[Any] = {split: jsonl_path} else: _lowercase : int = 'train' _lowercase : Any = {'train': jsonl_path, 'test': jsonl_path} _lowercase : Dict = tmp_path / 'cache' _lowercase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} _lowercase : int = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[str]: return json.load(_SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict: return [json.loads(_SCREAMING_SNAKE_CASE ) for line in buffer] class lowerCAmelCase_ : @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase ).write() buffer.seek(0 ) _lowercase : Optional[int] = load_json_function(__UpperCamelCase ) assert isinstance(__UpperCamelCase , __UpperCamelCase ) assert isinstance(exported_content[0] , __UpperCamelCase ) assert len(__UpperCamelCase ) == 1_0 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase , orient=__UpperCamelCase ).write() buffer.seek(0 ) _lowercase : List[Any] = load_json(__UpperCamelCase ) assert isinstance(__UpperCamelCase , __UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__UpperCamelCase , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 1_0 else: assert len(__UpperCamelCase ) == 1_0 @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) _lowercase : Any = load_json_function(__UpperCamelCase ) assert isinstance(__UpperCamelCase , __UpperCamelCase ) assert isinstance(exported_content[0] , __UpperCamelCase ) assert len(__UpperCamelCase ) == 1_0 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase , orient=__UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) _lowercase : Optional[Any] = load_json(__UpperCamelCase ) assert isinstance(__UpperCamelCase , __UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__UpperCamelCase , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 1_0 else: assert len(__UpperCamelCase ) == 1_0 def __a ( self , _lowerCAmelCase ): with pytest.raises(__UpperCamelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , num_proc=0 ) @pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Any = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}""" _lowercase : Any = str(shared_datadir / F"""test_file.json.{extension}""" ) JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , compression=__UpperCamelCase ).write() with fsspec.open(__UpperCamelCase , 'rb' , compression='infer' ) as f: _lowercase : int = f.read() with fsspec.open(__UpperCamelCase , 'rb' , compression='infer' ) as f: _lowercase : Union[str, Any] = f.read() assert exported_content == original_content
707
from __future__ import annotations def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> bool: return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
677
0
from ..utils import DummyObject, requires_backends class lowerCAmelCase_ ( metaclass=A_ ): _UpperCamelCase : Optional[int] = ['''transformers''', '''torch''', '''note_seq'''] def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ): requires_backends(self , ['transformers', 'torch', 'note_seq'] ) @classmethod def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ): requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) @classmethod def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ): requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
708
import math def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ) -> list: _lowercase : List[str] = end or len(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): _lowercase : Dict = i _lowercase : str = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: _lowercase : Optional[Any] = array[temp_index - 1] temp_index -= 1 _lowercase : Optional[Any] = temp_index_value return array def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: # Max Heap _lowercase : List[str] = index _lowercase : List[str] = 2 * index + 1 # Left Node _lowercase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: _lowercase : Any = left_index if right_index < heap_size and array[largest] < array[right_index]: _lowercase : str = right_index if largest != index: _lowercase , _lowercase : List[str] = array[largest], array[index] heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list: _lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE ) for i in range(n // 2 , -1 , -1 ): heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in range(n - 1 , 0 , -1 ): _lowercase , _lowercase : List[Any] = array[0], array[i] heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE ) return array def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: _lowercase : Optional[Any] = low _lowercase : Tuple = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i _lowercase , _lowercase : Tuple = array[j], array[i] i += 1 def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list: if len(SCREAMING_SNAKE_CASE ) == 0: return array _lowercase : List[str] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) ) _lowercase : str = 16 return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list: while end - start > size_threshold: if max_depth == 0: return heap_sort(SCREAMING_SNAKE_CASE ) max_depth -= 1 _lowercase : int = median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 ) _lowercase : str = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = p return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = input("Enter numbers separated by a comma : ").strip() UpperCamelCase = [float(item) for item in user_input.split(",")] print(sort(unsorted))
677
0
from __future__ import annotations UpperCamelCase = "#" class lowerCAmelCase_ : def __init__( self ): _lowercase : dict = {} def __a ( self , _lowerCAmelCase ): _lowercase : Any = self._trie for char in text: if char not in trie: _lowercase : Tuple = {} _lowercase : str = trie[char] _lowercase : List[str] = True def __a ( self , _lowerCAmelCase ): _lowercase : Dict = self._trie for char in prefix: if char in trie: _lowercase : Optional[int] = trie[char] else: return [] return self._elements(UpperCamelCase__ ) def __a ( self , _lowerCAmelCase ): _lowercase : int = [] for c, v in d.items(): _lowercase : Tuple = [' '] if c == END else [(c + s) for s in self._elements(UpperCamelCase__ )] result.extend(UpperCamelCase__ ) return tuple(UpperCamelCase__ ) UpperCamelCase = Trie() UpperCamelCase = ("depart", "detergent", "daring", "dog", "deer", "deal") for word in words: trie.insert_word(word) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int: _lowercase : Tuple = trie.find_word(lowerCAmelCase__ ) return tuple(string + word for word in suffixes ) def __magic_name__ ( ) -> int: print(autocomplete_using_trie('de' ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
709
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCamelCase = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["CLIPFeatureExtractor"] UpperCamelCase = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : str = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: _lowercase : Optional[Any] = 128 elif "12-12" in model_name: _lowercase : Optional[int] = 12 _lowercase : str = 12 elif "14-14" in model_name: _lowercase : List[str] = 14 _lowercase : Union[str, Any] = 14 elif "16-16" in model_name: _lowercase : List[str] = 16 _lowercase : Union[str, Any] = 16 else: raise ValueError('Model not supported' ) _lowercase : int = 'huggingface/label-files' if "speech-commands" in model_name: _lowercase : List[str] = 35 _lowercase : Union[str, Any] = 'speech-commands-v2-id2label.json' else: _lowercase : Optional[Any] = 527 _lowercase : Tuple = 'audioset-id2label.json' _lowercase : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) _lowercase : Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} _lowercase : Tuple = idalabel _lowercase : List[Any] = {v: k for k, v in idalabel.items()} return config def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> str: if "module.v" in name: _lowercase : Optional[Any] = name.replace('module.v' , 'audio_spectrogram_transformer' ) if "cls_token" in name: _lowercase : int = name.replace('cls_token' , 'embeddings.cls_token' ) if "dist_token" in name: _lowercase : Any = name.replace('dist_token' , 'embeddings.distillation_token' ) if "pos_embed" in name: _lowercase : List[Any] = name.replace('pos_embed' , 'embeddings.position_embeddings' ) if "patch_embed.proj" in name: _lowercase : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) # transformer blocks if "blocks" in name: _lowercase : List[Any] = name.replace('blocks' , 'encoder.layer' ) if "attn.proj" in name: _lowercase : Tuple = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: _lowercase : List[Any] = name.replace('attn' , 'attention.self' ) if "norm1" in name: _lowercase : Union[str, Any] = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: _lowercase : Union[str, Any] = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: _lowercase : Optional[Any] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: _lowercase : int = name.replace('mlp.fc2' , 'output.dense' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: _lowercase : Optional[int] = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' ) # classifier head if "module.mlp_head.0" in name: _lowercase : int = name.replace('module.mlp_head.0' , 'classifier.layernorm' ) if "module.mlp_head.1" in name: _lowercase : Optional[int] = name.replace('module.mlp_head.1' , 'classifier.dense' ) return name def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: for key in orig_state_dict.copy().keys(): _lowercase : str = orig_state_dict.pop(SCREAMING_SNAKE_CASE ) if "qkv" in key: _lowercase : str = key.split('.' ) _lowercase : Optional[Any] = int(key_split[3] ) _lowercase : Optional[Any] = config.hidden_size if "weight" in key: _lowercase : Dict = val[:dim, :] _lowercase : List[Any] = val[dim : dim * 2, :] _lowercase : Dict = val[-dim:, :] else: _lowercase : List[str] = val[:dim] _lowercase : Optional[int] = val[dim : dim * 2] _lowercase : List[str] = val[-dim:] else: _lowercase : Dict = val return orig_state_dict def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]: _lowercase : str = [ 'module.v.head.weight', 'module.v.head.bias', 'module.v.head_dist.weight', 'module.v.head_dist.bias', ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @torch.no_grad() def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple: _lowercase : List[Any] = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE ) _lowercase : Tuple = { 'ast-finetuned-audioset-10-10-0.4593': ( 'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.450': ( 'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.448': ( 'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.448-v2': ( 'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1' ), 'ast-finetuned-audioset-12-12-0.447': ( 'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1' ), 'ast-finetuned-audioset-14-14-0.443': ( 'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1' ), 'ast-finetuned-audioset-16-16-0.442': ( 'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1' ), 'ast-finetuned-speech-commands-v2': ( 'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1' ), } # load original state_dict _lowercase : int = model_name_to_url[model_name] _lowercase : List[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' ) # remove some keys remove_keys(SCREAMING_SNAKE_CASE ) # rename some keys _lowercase : Tuple = convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # load 🤗 model _lowercase : Tuple = ASTForAudioClassification(SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 _lowercase : List[str] = -4.267_7393 if 'speech-commands' not in model_name else -6.84_5978 _lowercase : Dict = 4.568_9974 if 'speech-commands' not in model_name else 5.565_4526 _lowercase : Union[str, Any] = 1_024 if 'speech-commands' not in model_name else 128 _lowercase : Any = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) if "speech-commands" in model_name: _lowercase : int = load_dataset('speech_commands' , 'v0.02' , split='validation' ) _lowercase : Optional[int] = dataset[0]['audio']['array'] else: _lowercase : List[str] = hf_hub_download( repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , ) _lowercase , _lowercase : str = torchaudio.load(SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = waveform.squeeze().numpy() _lowercase : Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE , sampling_rate=16_000 , return_tensors='pt' ) # forward pass _lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE ) _lowercase : Dict = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": _lowercase : Any = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": _lowercase : int = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": _lowercase : Tuple = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": _lowercase : int = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": _lowercase : Union[str, Any] = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": _lowercase : List[str] = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": _lowercase : List[str] = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": _lowercase : Union[str, Any] = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('Unknown model name' ) if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ): raise ValueError('Logits don\'t match' ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(SCREAMING_SNAKE_CASE ) print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE ) if push_to_hub: print('Pushing model and feature extractor to the hub...' ) model.push_to_hub(F"""MIT/{model_name}""" ) feature_extractor.push_to_hub(F"""MIT/{model_name}""" ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="ast-finetuned-audioset-10-10-0.4593", type=str, help="Name of the Audio Spectrogram Transformer model you\'d like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) UpperCamelCase = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
710
from collections.abc import Sequence def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: return sum(c * (x**i) for i, c in enumerate(SCREAMING_SNAKE_CASE ) ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: _lowercase : Optional[Any] = 0.0 for coeff in reversed(SCREAMING_SNAKE_CASE ): _lowercase : Optional[int] = result * x + coeff return result if __name__ == "__main__": UpperCamelCase = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCamelCase = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
677
0
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class lowerCAmelCase_ ( unittest.TestCase ): _UpperCamelCase : Tuple = inspect.getfile(accelerate.test_utils ) _UpperCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] ) _UpperCamelCase : List[str] = ['''accelerate''', '''launch'''] _UpperCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate''' _UpperCamelCase : Optional[Any] = '''default_config.yaml''' _UpperCamelCase : int = config_folder / config_file _UpperCamelCase : Any = config_folder / '''_default_config.yaml''' _UpperCamelCase : Optional[int] = Path("tests/test_configs" ) @classmethod def __a ( cls ): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def __a ( cls ): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def __a ( self ): _lowercase : Union[str, Any] = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def __a ( self ): for config in sorted(self.test_config_path.glob('**/*.yaml' ) ): with self.subTest(config_file=UpperCamelCase__ ): execute_subprocess_async( self.base_cmd + ['--config_file', str(UpperCamelCase__ ), self.test_file_path] , env=os.environ.copy() ) def __a ( self ): execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() ) class lowerCAmelCase_ ( unittest.TestCase ): _UpperCamelCase : Dict = '''test-tpu''' _UpperCamelCase : Tuple = '''us-central1-a''' _UpperCamelCase : Optional[Any] = '''ls''' _UpperCamelCase : List[str] = ['''accelerate''', '''tpu-config'''] _UpperCamelCase : Dict = '''cd /usr/share''' _UpperCamelCase : int = '''tests/test_samples/test_command_file.sh''' _UpperCamelCase : List[Any] = '''Running gcloud compute tpus tpu-vm ssh''' def __a ( self ): _lowercase : int = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=UpperCamelCase__ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , UpperCamelCase__ , ) def __a ( self ): _lowercase : Dict = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=UpperCamelCase__ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , UpperCamelCase__ , ) def __a ( self ): _lowercase : Union[str, Any] = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=UpperCamelCase__ ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , UpperCamelCase__ , ) def __a ( self ): _lowercase : Tuple = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=UpperCamelCase__ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , UpperCamelCase__ , ) def __a ( self ): _lowercase : int = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo \"Hello World\"', '--debug', ] , return_stdout=UpperCamelCase__ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , UpperCamelCase__ , ) def __a ( self ): _lowercase : Tuple = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=UpperCamelCase__ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , UpperCamelCase__ , ) def __a ( self ): _lowercase : int = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=UpperCamelCase__ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , UpperCamelCase__ , ) def __a ( self ): _lowercase : Optional[int] = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=UpperCamelCase__ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , UpperCamelCase__ , ) def __a ( self ): _lowercase : Any = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=UpperCamelCase__ , ) self.assertIn( F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , UpperCamelCase__ , )
711
from __future__ import annotations class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase=None ): _lowercase : int = data _lowercase : Union[str, Any] = None def __repr__( self ): _lowercase : Dict = [] _lowercase : Tuple = self while temp: string_rep.append(F"""{temp.data}""" ) _lowercase : Optional[Any] = temp.next return "->".join(_lowerCAmelCase ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any: if not elements_list: raise Exception('The Elements List is empty' ) _lowercase : Union[str, Any] = Node(elements_list[0] ) for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): _lowercase : Optional[int] = Node(elements_list[i] ) _lowercase : List[Any] = current.next return head def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> None: if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): print_reverse(head_node.next ) print(head_node.data ) def __magic_name__ ( ) -> List[str]: from doctest import testmod testmod() _lowercase : int = make_linked_list([14, 52, 14, 12, 43] ) print('Linked List:' ) print(SCREAMING_SNAKE_CASE ) print('Elements in Reverse:' ) print_reverse(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
677
0
import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Union[str, Any] = jnp.ones((batch_size, length) ) / length return scores def __a ( self ): _lowercase : List[str] = None _lowercase : Tuple = 2_0 _lowercase : Tuple = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase_ ) # tweak scores to not be uniform anymore _lowercase : Optional[Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch _lowercase : Optional[int] = scores.at[1, 1_0].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax _lowercase : str = jax.nn.softmax(lowerCAmelCase_ , axis=-1 ) _lowercase : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 ) _lowercase : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=1.3 ) _lowercase : List[str] = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase_ , scores.copy() , cur_len=lowerCAmelCase_ ) , axis=-1 ) _lowercase : int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase_ , scores.copy() , cur_len=lowerCAmelCase_ ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def __a ( self ): _lowercase : Tuple = None _lowercase : Tuple = 1_0 _lowercase : str = 2 # create ramp distribution _lowercase : List[Any] = np.broadcast_to(np.arange(lowerCAmelCase_ )[None, :] , (batch_size, vocab_size) ).copy() _lowercase : Union[str, Any] = ramp_logits[1:, : vocab_size // 2] + vocab_size _lowercase : Dict = FlaxTopKLogitsWarper(3 ) _lowercase : Optional[Any] = top_k_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case _lowercase : int = 5 _lowercase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) _lowercase : Any = np.broadcast_to(np.arange(lowerCAmelCase_ )[None, :] , (batch_size, length) ).copy() _lowercase : Dict = top_k_warp_safety_check(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def __a ( self ): _lowercase : List[str] = None _lowercase : Any = 1_0 _lowercase : List[str] = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) _lowercase : int = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) _lowercase : Optional[int] = FlaxTopPLogitsWarper(0.8 ) _lowercase : List[Any] = np.exp(top_p_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 _lowercase : Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) # check edge cases with negative and extreme logits _lowercase : List[Any] = np.broadcast_to(np.arange(lowerCAmelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme _lowercase : Optional[int] = ramp_logits[1] * 1_00.0 # make sure at least 2 tokens are kept _lowercase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) _lowercase : Dict = top_p_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def __a ( self ): _lowercase : Dict = 2_0 _lowercase : List[str] = 4 _lowercase : List[Any] = 0 _lowercase : Optional[int] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=lowerCAmelCase_ ) # check that min length is applied at length 5 _lowercase : str = ids_tensor((batch_size, 2_0) , vocab_size=2_0 ) _lowercase : Tuple = 5 _lowercase : int = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase : str = min_dist_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('inf' )] ) # check that min length is not applied anymore at length 15 _lowercase : int = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase : Union[str, Any] = 1_5 _lowercase : Tuple = min_dist_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) self.assertFalse(jnp.isinf(lowerCAmelCase_ ).any() ) def __a ( self ): _lowercase : int = 2_0 _lowercase : int = 4 _lowercase : Any = 0 _lowercase : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase_ ) # check that all scores are -inf except the bos_token_id score _lowercase : Dict = ids_tensor((batch_size, 1) , vocab_size=2_0 ) _lowercase : Dict = 1 _lowercase : str = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase : Optional[int] = logits_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 _lowercase : Tuple = 3 _lowercase : str = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase : Any = logits_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) self.assertFalse(jnp.isinf(lowerCAmelCase_ ).any() ) def __a ( self ): _lowercase : Optional[int] = 2_0 _lowercase : Tuple = 4 _lowercase : Tuple = 0 _lowercase : Optional[int] = 5 _lowercase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ ) # check that all scores are -inf except the eos_token_id when max_length is reached _lowercase : Optional[int] = ids_tensor((batch_size, 4) , vocab_size=2_0 ) _lowercase : Optional[int] = 4 _lowercase : Optional[int] = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase : Dict = logits_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached _lowercase : List[str] = 3 _lowercase : Any = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase : Any = logits_processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) self.assertFalse(jnp.isinf(lowerCAmelCase_ ).any() ) def __a ( self ): _lowercase : Tuple = 4 _lowercase : Dict = 1_0 _lowercase : Any = 1_5 _lowercase : Tuple = 2 _lowercase : str = 1 _lowercase : Optional[Any] = 1_5 # dummy input_ids and scores _lowercase : Optional[int] = ids_tensor((batch_size, sequence_length) , lowerCAmelCase_ ) _lowercase : Union[str, Any] = input_ids.copy() _lowercase : str = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase : Union[str, Any] = scores.copy() # instantiate all dist processors _lowercase : Any = FlaxTemperatureLogitsWarper(temperature=0.5 ) _lowercase : int = FlaxTopKLogitsWarper(3 ) _lowercase : Dict = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors _lowercase : Tuple = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=lowerCAmelCase_ ) _lowercase : Tuple = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase_ ) _lowercase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ ) _lowercase : Dict = 1_0 # no processor list _lowercase : Optional[int] = temp_dist_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) _lowercase : Optional[Any] = top_k_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) _lowercase : Union[str, Any] = top_p_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) _lowercase : str = min_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) _lowercase : Union[str, Any] = bos_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) _lowercase : List[str] = eos_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) # with processor list _lowercase : Any = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) _lowercase : List[str] = processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def __a ( self ): _lowercase : Union[str, Any] = 4 _lowercase : Optional[int] = 1_0 _lowercase : int = 1_5 _lowercase : Any = 2 _lowercase : List[Any] = 1 _lowercase : List[Any] = 1_5 # dummy input_ids and scores _lowercase : Union[str, Any] = ids_tensor((batch_size, sequence_length) , lowerCAmelCase_ ) _lowercase : Optional[int] = input_ids.copy() _lowercase : Tuple = self._get_uniform_logits(lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase : List[Any] = scores.copy() # instantiate all dist processors _lowercase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 ) _lowercase : int = FlaxTopKLogitsWarper(3 ) _lowercase : str = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors _lowercase : Optional[Any] = FlaxMinLengthLogitsProcessor(min_length=1_0 , eos_token_id=lowerCAmelCase_ ) _lowercase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase_ ) _lowercase : str = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ ) _lowercase : int = 1_0 # no processor list def run_no_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Dict = temp_dist_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) _lowercase : Union[str, Any] = top_k_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) _lowercase : Union[str, Any] = top_p_warp(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) _lowercase : Tuple = min_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) _lowercase : List[str] = bos_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) _lowercase : Dict = eos_dist_proc(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) return scores # with processor list def run_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : str = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) _lowercase : Optional[int] = processor(lowerCAmelCase_ , lowerCAmelCase_ , cur_len=lowerCAmelCase_ ) return scores _lowercase : int = jax.jit(lowerCAmelCase_ ) _lowercase : List[Any] = jax.jit(lowerCAmelCase_ ) _lowercase : Optional[int] = jitted_run_no_processor_list(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _lowercase : str = jitted_run_processor_list(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
712
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007 def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut: return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut: return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2) if __name__ == "__main__": def __magic_name__ ( ) -> None: from timeit import timeit print('Without Numpy' ) print( timeit( 'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) print('With Numpy' ) print( timeit( 'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) ) benchmark()
677
0
from __future__ import annotations def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> list[list[str]]: _lowercase : Optional[Any] = word_bank or [] # create a table _lowercase : int = len(a_ ) + 1 _lowercase : list[list[list[str]]] = [] for _ in range(a_ ): table.append([] ) # seed value _lowercase : Union[str, Any] = [[]] # because empty string has empty combination # iterate through the indices for i in range(a_ ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(a_ )] == word: _lowercase : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(a_ )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(a_ )]: combination.reverse() return table[len(a_ )] if __name__ == "__main__": print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"])) print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"])) print( all_construct( "hexagonosaurus", ["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"], ) )
713
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { "configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", "Swinv2Model", "Swinv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin UpperCamelCase = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class lowerCAmelCase_ ( unittest.TestCase , _a ): def __a ( self ): _lowercase : str = load_tool('text-question-answering' ) self.tool.setup() _lowercase : Tuple = load_tool('text-question-answering' , remote=_A ) def __a ( self ): _lowercase : int = self.tool(_A , 'What did Hugging Face do in April 2021?' ) self.assertEqual(_A , 'launched the BigScience Research Workshop' ) def __a ( self ): _lowercase : Dict = self.remote_tool(_A , 'What did Hugging Face do in April 2021?' ) self.assertEqual(_A , 'launched the BigScience Research Workshop' ) def __a ( self ): _lowercase : Tuple = self.tool(text=_A , question='What did Hugging Face do in April 2021?' ) self.assertEqual(_A , 'launched the BigScience Research Workshop' ) def __a ( self ): _lowercase : str = self.remote_tool(text=_A , question='What did Hugging Face do in April 2021?' ) self.assertEqual(_A , 'launched the BigScience Research Workshop' )
714
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase = { "vocab_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt" ), "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json" ), "google/electra-base-generator": ( "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json" ), "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json" ), }, } UpperCamelCase = { "google/electra-small-generator": 512, "google/electra-base-generator": 512, "google/electra-large-generator": 512, "google/electra-small-discriminator": 512, "google/electra-base-discriminator": 512, "google/electra-large-discriminator": 512, } UpperCamelCase = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Any = VOCAB_FILES_NAMES _UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : str = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[str] = ElectraTokenizer def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ): super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) _lowercase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars ): _lowercase : Any = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) ) _lowercase : Dict = do_lower_case _lowercase : Optional[Any] = strip_accents _lowercase : Any = tokenize_chinese_chars _lowercase : Tuple = normalizer_class(**_lowerCAmelCase ) _lowercase : Union[str, Any] = do_lower_case def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ): _lowercase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : str = [self.sep_token_id] _lowercase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : Any = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
677
0
import argparse import os import re import packaging.version UpperCamelCase = "examples/" UpperCamelCase = { "examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } UpperCamelCase = { "init": "src/transformers/__init__.py", "setup": "setup.py", } UpperCamelCase = "README.md" def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: with open(UpperCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f: _lowercase : List[str] = f.read() _lowercase , _lowercase : Tuple = REPLACE_PATTERNS[pattern] _lowercase : Any = replace.replace('VERSION' , UpperCAmelCase__ ) _lowercase : Tuple = re_pattern.sub(UpperCAmelCase__ , UpperCAmelCase__ ) with open(UpperCAmelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(UpperCAmelCase__ ) def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> List[Any]: for folder, directories, fnames in os.walk(UpperCAmelCase__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ , pattern='examples' ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if not patch: update_version_in_examples(UpperCAmelCase__ ) def __magic_name__ ( ) -> int: _lowercase : str = '🤗 Transformers currently provides the following architectures' _lowercase : Tuple = '1. Want to contribute a new model?' with open(UpperCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f: _lowercase : Tuple = f.readlines() # Find the start of the list. _lowercase : Any = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _lowercase : Dict = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): _lowercase : Any = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(UpperCAmelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(UpperCAmelCase__ ) def __magic_name__ ( ) -> Union[str, Any]: with open(REPLACE_FILES['init'] , 'r' ) as f: _lowercase : Optional[int] = f.read() _lowercase : Optional[Any] = REPLACE_PATTERNS['init'][0].search(UpperCAmelCase__ ).groups()[0] return packaging.version.parse(UpperCAmelCase__ ) def __magic_name__ ( SCREAMING_SNAKE_CASE=False ) -> Tuple: _lowercase : Any = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: _lowercase : int = default_version.base_version elif patch: _lowercase : int = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: _lowercase : Optional[Any] = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. _lowercase : Any = input(F"""Which version are you releasing? [{default_version}]""" ) if len(UpperCAmelCase__ ) == 0: _lowercase : Optional[int] = default_version print(F"""Updating version to {version}.""" ) global_version_update(UpperCAmelCase__ , patch=UpperCAmelCase__ ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def __magic_name__ ( ) -> Dict: _lowercase : Any = get_version() _lowercase : Any = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" _lowercase : Union[str, Any] = current_version.base_version # Check with the user we got that right. _lowercase : Optional[Any] = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(UpperCAmelCase__ ) == 0: _lowercase : List[str] = dev_version print(F"""Updating version to {version}.""" ) global_version_update(UpperCAmelCase__ ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") UpperCamelCase = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
715
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor UpperCamelCase = logging.get_logger(__name__) class lowerCAmelCase_ ( __snake_case ): def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ): warnings.warn( 'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use YolosImageProcessor instead.' , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
716
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: for attribute in key.split('.' ): _lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: _lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: _lowercase : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": _lowercase : List[str] = value elif weight_type == "weight_g": _lowercase : Any = value elif weight_type == "weight_v": _lowercase : Tuple = value elif weight_type == "bias": _lowercase : List[str] = value else: _lowercase : Dict = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Optional[int] = [] _lowercase : Optional[int] = fairseq_model.state_dict() _lowercase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _lowercase : Dict = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) _lowercase : int = True else: for key, mapped_key in MAPPING.items(): _lowercase : Union[str, Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned): _lowercase : Union[str, Any] = True if "*" in mapped_key: _lowercase : Dict = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2] _lowercase : Dict = mapped_key.replace('*' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: _lowercase : Optional[int] = 'weight_g' elif "weight_v" in name: _lowercase : Optional[Any] = 'weight_v' elif "weight" in name: _lowercase : str = 'weight' elif "bias" in name: _lowercase : Any = 'bias' else: _lowercase : str = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: _lowercase : Any = full_name.split('conv_layers.' )[-1] _lowercase : Any = name.split('.' ) _lowercase : Optional[Any] = int(items[0] ) _lowercase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _lowercase : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _lowercase : List[str] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _lowercase : Union[str, Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _lowercase : List[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) @torch.no_grad() def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]: if config_path is not None: _lowercase : Optional[int] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: _lowercase : List[Any] = HubertConfig() if is_finetuned: if dict_path: _lowercase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _lowercase : Dict = target_dict.pad_index _lowercase : Dict = target_dict.bos_index _lowercase : Tuple = target_dict.eos_index _lowercase : List[Any] = len(target_dict.symbols ) _lowercase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) ) return os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , SCREAMING_SNAKE_CASE ) _lowercase : int = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , ) _lowercase : str = True if config.feat_extract_norm == 'layer' else False _lowercase : Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) _lowercase : Tuple = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) _lowercase : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE ) else: _lowercase : List[Any] = HubertModel(SCREAMING_SNAKE_CASE ) if is_finetuned: _lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: _lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _lowercase : int = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) UpperCamelCase = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
677
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase = { """vocab_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt""" ), """google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""", """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json""" ), """google/electra-base-generator""": ( """https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json""" ), """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json""" ), }, } UpperCamelCase = { """google/electra-small-generator""": 512, """google/electra-base-generator""": 512, """google/electra-large-generator""": 512, """google/electra-small-discriminator""": 512, """google/electra-base-discriminator""": 512, """google/electra-large-discriminator""": 512, } UpperCamelCase = { """google/electra-small-generator""": {"""do_lower_case""": True}, """google/electra-base-generator""": {"""do_lower_case""": True}, """google/electra-large-generator""": {"""do_lower_case""": True}, """google/electra-small-discriminator""": {"""do_lower_case""": True}, """google/electra-base-discriminator""": {"""do_lower_case""": True}, """google/electra-large-discriminator""": {"""do_lower_case""": True}, } class lowerCAmelCase_ ( __UpperCAmelCase ): _UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES _UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[str] = ElectraTokenizer def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ): super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) _lowercase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _lowerCAmelCase ) != tokenize_chinese_chars ): _lowercase : Optional[Any] = getattr(_lowerCAmelCase , normalizer_state.pop('type' ) ) _lowercase : str = do_lower_case _lowercase : List[str] = strip_accents _lowercase : Union[str, Any] = tokenize_chinese_chars _lowercase : List[Any] = normalizer_class(**_lowerCAmelCase ) _lowercase : List[str] = do_lower_case def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ): _lowercase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : List[Any] = [self.sep_token_id] _lowercase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : Tuple = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
717
from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1_0_0_0 , ): _lowercase : List[str] = parent _lowercase : Optional[Any] = batch_size _lowercase : str = seq_length _lowercase : Dict = is_training _lowercase : Optional[int] = use_input_mask _lowercase : List[Any] = use_token_type_ids _lowercase : Union[str, Any] = use_labels _lowercase : Optional[Any] = vocab_size _lowercase : Optional[Any] = hidden_size _lowercase : str = num_hidden_layers _lowercase : Tuple = num_attention_heads _lowercase : Optional[Any] = intermediate_size _lowercase : Optional[Any] = hidden_act _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : Union[str, Any] = attention_probs_dropout_prob _lowercase : int = max_position_embeddings _lowercase : str = type_vocab_size _lowercase : Tuple = type_sequence_label_size _lowercase : Dict = initializer_range _lowercase : List[Any] = num_labels _lowercase : List[str] = num_choices _lowercase : Dict = scope _lowercase : List[Any] = range_bbox def __a ( self ): _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _lowercase : List[str] = bbox[i, j, 3] _lowercase : Optional[int] = bbox[i, j, 1] _lowercase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowercase : Dict = bbox[i, j, 2] _lowercase : Dict = bbox[i, j, 0] _lowercase : int = t _lowercase : Union[str, Any] = tf.convert_to_tensor(_lowerCAmelCase ) _lowercase : Any = None if self.use_input_mask: _lowercase : int = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : Tuple = None if self.use_token_type_ids: _lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowercase : Tuple = None _lowercase : Union[str, Any] = None _lowercase : List[str] = None if self.use_labels: _lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : str = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : Any = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFLayoutLMModel(config=_lowerCAmelCase ) _lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowercase : List[Any] = model(_lowerCAmelCase , _lowerCAmelCase , token_type_ids=_lowerCAmelCase ) _lowercase : List[str] = model(_lowerCAmelCase , _lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[Any] = TFLayoutLMForMaskedLM(config=_lowerCAmelCase ) _lowercase : Any = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : str = self.num_labels _lowercase : Tuple = TFLayoutLMForSequenceClassification(config=_lowerCAmelCase ) _lowercase : int = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Any = self.num_labels _lowercase : Optional[int] = TFLayoutLMForTokenClassification(config=_lowerCAmelCase ) _lowercase : Union[str, Any] = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowerCAmelCase ) _lowercase : str = model(_lowerCAmelCase , _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self ): _lowercase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : List[Any] = config_and_inputs _lowercase : Optional[Any] = { 'input_ids': input_ids, 'bbox': bbox, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class lowerCAmelCase_ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCamelCase : Optional[int] = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) _UpperCamelCase : Union[str, Any] = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) _UpperCamelCase : str = False _UpperCamelCase : List[str] = True _UpperCamelCase : Tuple = 10 def __a ( self ): _lowercase : Optional[int] = TFLayoutLMModelTester(self ) _lowercase : str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __a ( self ): _lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) @slow def __a ( self ): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : List[Any] = TFLayoutLMModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @unittest.skip('Onnx compliancy broke with TF 2.10' ) def __a ( self ): pass def __magic_name__ ( ) -> Optional[int]: # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: # fmt: off _lowercase : Optional[Any] = tf.convert_to_tensor([[101,1_019,1_014,1_016,1_037,12_849,4_747,1_004,14_246,2_278,5_439,4_524,5_002,2_930,2_193,2_930,4_341,3_208,1_005,1_055,2_171,2_848,11_300,3_531,102],[101,4_070,4_034,7_020,1_024,3_058,1_015,1_013,2_861,1_013,6_070,19_274,2_772,6_205,27_814,16_147,16_147,4_343,2_047,10_283,10_969,14_389,1_012,2_338,102]] ) # noqa: E231 _lowercase : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 _lowercase : Optional[int] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1_000,1_000,1_000,1_000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1_000,1_000,1_000,1_000]]] ) # noqa: E231 _lowercase : int = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) _lowercase : Union[str, Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class lowerCAmelCase_ ( unittest.TestCase ): @slow def __a ( self ): _lowercase : Tuple = TFLayoutLMModel.from_pretrained('microsoft/layoutlm-base-uncased' ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Tuple = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) # test the sequence output on [0, :3, :3] _lowercase : Optional[Any] = tf.convert_to_tensor( [[0.17_85, -0.19_47, -0.04_25], [-0.32_54, -0.28_07, 0.25_53], [-0.53_91, -0.33_22, 0.33_64]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-3 ) ) # test the pooled output on [1, :3] _lowercase : Optional[int] = tf.convert_to_tensor([-0.65_80, -0.02_14, 0.85_52] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowerCAmelCase , atol=1E-3 ) ) @slow def __a ( self ): # initialize model with randomly initialized sequence classification head _lowercase : Optional[Any] = TFLayoutLMForSequenceClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=2 ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Any = model( input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar _lowercase : List[Any] = outputs.loss _lowercase : Any = (2,) self.assertEqual(loss.shape , _lowerCAmelCase ) # test the shape of the logits _lowercase : str = outputs.logits _lowercase : Dict = (2, 2) self.assertEqual(logits.shape , _lowerCAmelCase ) @slow def __a ( self ): # initialize model with randomly initialized token classification head _lowercase : Dict = TFLayoutLMForTokenClassification.from_pretrained('microsoft/layoutlm-base-uncased' , num_labels=1_3 ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = prepare_layoutlm_batch_inputs() # forward pass _lowercase : Dict = model( input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) # test the shape of the logits _lowercase : Dict = outputs.logits _lowercase : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) ) self.assertEqual(logits.shape , _lowerCAmelCase ) @slow def __a ( self ): # initialize model with randomly initialized token classification head _lowercase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('microsoft/layoutlm-base-uncased' ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = prepare_layoutlm_batch_inputs() # forward pass _lowercase : int = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) # test the shape of the logits _lowercase : Any = tf.convert_to_tensor((2, 2_5) ) self.assertEqual(outputs.start_logits.shape , _lowerCAmelCase ) self.assertEqual(outputs.end_logits.shape , _lowerCAmelCase )
677
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase = { "vocab_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt" ), "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json" ), "google/electra-base-generator": ( "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json" ), "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json" ), }, } UpperCamelCase = { "google/electra-small-generator": 512, "google/electra-base-generator": 512, "google/electra-large-generator": 512, "google/electra-small-discriminator": 512, "google/electra-base-discriminator": 512, "google/electra-large-discriminator": 512, } UpperCamelCase = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class lowerCAmelCase_ ( __lowerCAmelCase ): _UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES _UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : List[str] = ElectraTokenizer def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ): super().__init__( lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , ) _lowercase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , lowerCamelCase__ ) != do_lower_case or normalizer_state.get('strip_accents' , lowerCamelCase__ ) != strip_accents or normalizer_state.get('handle_chinese_chars' , lowerCamelCase__ ) != tokenize_chinese_chars ): _lowercase : Dict = getattr(lowerCamelCase__ , normalizer_state.pop('type' ) ) _lowercase : Optional[Any] = do_lower_case _lowercase : int = strip_accents _lowercase : Any = tokenize_chinese_chars _lowercase : Optional[int] = normalizer_class(**lowerCamelCase__ ) _lowercase : Dict = do_lower_case def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ): _lowercase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : Union[str, Any] = [self.sep_token_id] _lowercase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : Any = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ )
718
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class lowerCAmelCase_ ( unittest.TestCase ): def __a ( self ): _lowercase : List[str] = logging.get_logger() # the current default level is logging.WARNING _lowercase : Union[str, Any] = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(_lowerCAmelCase ) def __a ( self ): _lowercase : List[str] = logging.get_verbosity() _lowercase : int = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : Tuple = 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(_lowerCAmelCase ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def __a ( self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var _lowercase : List[str] = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : int = os.getenv('TRANSFORMERS_VERBOSITY' , _lowerCAmelCase ) _lowercase : Optional[Any] = logging.log_levels[env_level_str] _lowercase : Dict = logging.get_verbosity() self.assertEqual( _lowerCAmelCase , _lowerCAmelCase , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , ) # restore to the original level _lowercase : Any = '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def __a ( self ): # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() _lowercase : Tuple = logging.logging.getLogger() with CaptureLogger(_lowerCAmelCase ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def __a ( self ): # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() _lowercase : str = logging.get_logger('transformers.models.bart.tokenization_bart' ) _lowercase : List[str] = 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning_advice(_lowerCAmelCase ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(_lowerCAmelCase ) as cl: logger.warning_advice(_lowerCAmelCase ) self.assertEqual(cl.out , msg + '\n' ) def __magic_name__ ( ) -> List[str]: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
677
0
# Algorithm for the pigeonhole sorting def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[int]: _lowercase : Any = min(snake_case_ ) # min() finds the minimum value _lowercase : Dict = max(snake_case_ ) # max() finds the maximum value _lowercase : Optional[int] = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size _lowercase : Optional[Any] = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(snake_case_ , snake_case_ ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. _lowercase : Dict = 0 for count in range(snake_case_ ): while holes[count] > 0: holes[count] -= 1 _lowercase : List[str] = count + min_val i += 1 def __magic_name__ ( ) -> int: _lowercase : List[Any] = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(snake_case_ ) print('Sorted order is:' , ' '.join(snake_case_ ) ) if __name__ == "__main__": main()
719
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): UpperCamelCase = "pt" elif is_tf_available(): UpperCamelCase = "tf" else: UpperCamelCase = "jax" class lowerCAmelCase_ ( __snake_case , unittest.TestCase ): _UpperCamelCase : Dict = PerceiverTokenizer _UpperCamelCase : str = False def __a ( self ): super().setUp() _lowercase : List[Any] = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __a ( self ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def __a ( self , **_lowerCAmelCase ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=2_0 , _lowerCAmelCase=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. _lowercase : Union[str, Any] = [] for i in range(len(_lowerCAmelCase ) ): try: _lowercase : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_lowerCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) _lowercase : List[Any] = list(filter(lambda _lowerCAmelCase : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _lowerCAmelCase ) ) _lowercase : Union[str, Any] = list(filter(lambda _lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_lowerCAmelCase ) , _lowerCAmelCase ) ) if max_length is not None and len(_lowerCAmelCase ) > max_length: _lowercase : Any = toks[:max_length] if min_length is not None and len(_lowerCAmelCase ) < min_length and len(_lowerCAmelCase ) > 0: while len(_lowerCAmelCase ) < min_length: _lowercase : Optional[Any] = toks + toks # toks_str = [t[1] for t in toks] _lowercase : Optional[Any] = [t[0] for t in toks] # Ensure consistency _lowercase : Any = tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase ) if " " not in output_txt and len(_lowerCAmelCase ) > 1: _lowercase : List[str] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_lowerCAmelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_lowerCAmelCase ) ) if with_prefix_space: _lowercase : List[Any] = ' ' + output_txt _lowercase : Dict = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) return output_txt, output_ids def __a ( self ): _lowercase : Dict = self.perceiver_tokenizer _lowercase : Optional[Any] = 'Unicode €.' _lowercase : str = tokenizer(_lowerCAmelCase ) _lowercase : int = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded['input_ids'] , _lowerCAmelCase ) # decoding _lowercase : List[Any] = tokenizer.decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , '[CLS]Unicode €.[SEP]' ) _lowercase : Union[str, Any] = tokenizer('e è é ê ë' ) _lowercase : List[Any] = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded['input_ids'] , _lowerCAmelCase ) # decoding _lowercase : int = tokenizer.decode(_lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def __a ( self ): _lowercase : List[str] = self.perceiver_tokenizer _lowercase : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off _lowercase : Optional[int] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on _lowercase : List[Any] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase ) if FRAMEWORK != "jax": _lowercase : int = list(batch.input_ids.numpy()[0] ) else: _lowercase : List[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def __a ( self ): _lowercase : List[Any] = self.perceiver_tokenizer _lowercase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _lowercase : List[str] = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _lowerCAmelCase ) self.assertIn('attention_mask' , _lowerCAmelCase ) self.assertNotIn('decoder_input_ids' , _lowerCAmelCase ) self.assertNotIn('decoder_attention_mask' , _lowerCAmelCase ) def __a ( self ): _lowercase : Optional[int] = self.perceiver_tokenizer _lowercase : Optional[Any] = [ 'Summary of the text.', 'Another summary.', ] _lowercase : Optional[int] = tokenizer( text_target=_lowerCAmelCase , max_length=3_2 , padding='max_length' , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) self.assertEqual(3_2 , targets['input_ids'].shape[1] ) def __a ( self ): # safety check on max_len default value so we are sure the test works _lowercase : Tuple = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test _lowercase : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : Dict = tempfile.mkdtemp() _lowercase : Tuple = ' He is very happy, UNwant\u00E9d,running' _lowercase : Union[str, Any] = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) _lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase ) _lowercase : Optional[Any] = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) shutil.rmtree(_lowerCAmelCase ) _lowercase : Union[str, Any] = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _lowercase : List[str] = tempfile.mkdtemp() _lowercase : int = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) _lowercase : Any = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) _lowercase : Tuple = tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) tokenizer.save_pretrained(_lowerCAmelCase ) _lowercase : Tuple = tokenizer.__class__.from_pretrained(_lowerCAmelCase ) _lowercase : Tuple = after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) _lowercase : List[Any] = tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(_lowerCAmelCase ) def __a ( self ): _lowercase : Optional[Any] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: _lowercase : List[str] = json.load(_lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: _lowercase : Tuple = json.load(_lowerCAmelCase ) _lowercase : Any = [F"""<extra_id_{i}>""" for i in range(1_2_5 )] _lowercase : str = added_tokens_extra_ids + [ 'an_additional_special_token' ] _lowercase : Optional[int] = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_lowerCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_lowerCAmelCase , _lowerCAmelCase ) with open(os.path.join(_lowerCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_lowerCAmelCase , _lowerCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _lowercase : Optional[int] = tokenizer_class.from_pretrained( _lowerCAmelCase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _lowercase : int = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_lowerCAmelCase )] _lowercase : Tuple = tokenizer_class.from_pretrained( _lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __a ( self ): _lowercase : str = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , '�' ) def __a ( self ): pass def __a ( self ): pass def __a ( self ): pass def __a ( self ): pass def __a ( self ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens _lowercase : List[str] = self.get_tokenizers(fast=_lowerCAmelCase , do_lower_case=_lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _lowercase : Optional[Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] _lowercase : Optional[Any] = tokenizer.convert_tokens_to_string(_lowerCAmelCase ) self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
677
0
from collections import namedtuple import requests from lxml import html # type: ignore UpperCamelCase = namedtuple("covid_data", "cases deaths recovered") def __magic_name__ ( SCREAMING_SNAKE_CASE = "https://www.worldometers.info/coronavirus/" ) -> Any: _lowercase : Any = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(UpperCamelCase__ ).content ).xpath(UpperCamelCase__ ) ) UpperCamelCase = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
720
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["ConditionalDetrFeatureExtractor"] UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
677
0
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. UpperCamelCase = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class lowerCAmelCase_ ( unittest.TestCase ): _UpperCamelCase : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _UpperCamelCase : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: _UpperCamelCase : List[str] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: _UpperCamelCase : str = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Optional[int] = ZeroShotClassificationPipeline( model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , candidate_labels=['polics', 'health'] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def __a ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : List[str] = classifier('Who are you voting for in 2020?' , candidate_labels='politics' ) self.assertEqual(_lowerCAmelCase , {'sequence': ANY(_lowerCAmelCase ), 'labels': [ANY(_lowerCAmelCase )], 'scores': [ANY(_lowerCAmelCase )]} ) # No kwarg _lowercase : Optional[int] = classifier('Who are you voting for in 2020?' , ['politics'] ) self.assertEqual(_lowerCAmelCase , {'sequence': ANY(_lowerCAmelCase ), 'labels': [ANY(_lowerCAmelCase )], 'scores': [ANY(_lowerCAmelCase )]} ) _lowercase : Tuple = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] ) self.assertEqual(_lowerCAmelCase , {'sequence': ANY(_lowerCAmelCase ), 'labels': [ANY(_lowerCAmelCase )], 'scores': [ANY(_lowerCAmelCase )]} ) _lowercase : int = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' ) self.assertEqual( _lowerCAmelCase , {'sequence': ANY(_lowerCAmelCase ), 'labels': [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )], 'scores': [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 ) _lowercase : Any = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] ) self.assertEqual( _lowerCAmelCase , {'sequence': ANY(_lowerCAmelCase ), 'labels': [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )], 'scores': [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 ) _lowercase : List[str] = classifier( 'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' ) self.assertEqual(_lowerCAmelCase , {'sequence': ANY(_lowerCAmelCase ), 'labels': [ANY(_lowerCAmelCase )], 'scores': [ANY(_lowerCAmelCase )]} ) # https://github.com/huggingface/transformers/issues/13846 _lowercase : Optional[Any] = classifier(['I am happy'] , ['positive', 'negative'] ) self.assertEqual( _lowerCAmelCase , [ {'sequence': ANY(_lowerCAmelCase ), 'labels': [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )], 'scores': [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )]} for i in range(1 ) ] , ) _lowercase : Union[str, Any] = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] ) self.assertEqual( _lowerCAmelCase , [ {'sequence': ANY(_lowerCAmelCase ), 'labels': [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )], 'scores': [ANY(_lowerCAmelCase ), ANY(_lowerCAmelCase )]} for i in range(2 ) ] , ) with self.assertRaises(_lowerCAmelCase ): classifier('' , candidate_labels='politics' ) with self.assertRaises(_lowerCAmelCase ): classifier(_lowerCAmelCase , candidate_labels='politics' ) with self.assertRaises(_lowerCAmelCase ): classifier('Who are you voting for in 2020?' , candidate_labels='' ) with self.assertRaises(_lowerCAmelCase ): classifier('Who are you voting for in 2020?' , candidate_labels=_lowerCAmelCase ) with self.assertRaises(_lowerCAmelCase ): classifier( 'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , ) with self.assertRaises(_lowerCAmelCase ): classifier( 'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=_lowerCAmelCase , ) self.run_entailment_id(_lowerCAmelCase ) def __a ( self , _lowerCAmelCase ): _lowercase : Dict = zero_shot_classifier.model.config _lowercase : Union[str, Any] = config.labelaid _lowercase : Tuple = zero_shot_classifier.entailment_id _lowercase : str = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) _lowercase : Dict = {'entailment': 0, 'neutral': 1, 'contradiction': 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) _lowercase : int = {'ENTAIL': 0, 'NON-ENTAIL': 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) _lowercase : Optional[int] = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) _lowercase : str = original_labelaid self.assertEqual(_lowerCAmelCase , zero_shot_classifier.entailment_id ) @require_torch def __a ( self ): _lowercase : List[Any] = pipeline( 'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( 'Who are you voting for in 2020?' * 1_0_0 , candidate_labels=['politics', 'public health', 'science'] ) @require_torch def __a ( self ): _lowercase : Tuple = pipeline( 'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , ) _lowercase : Optional[Any] = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.3_33, 0.3_33, 0.3_33], } , ) @require_tf def __a ( self ): _lowercase : Dict = pipeline( 'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , ) _lowercase : Any = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.3_33, 0.3_33, 0.3_33], } , ) @slow @require_torch def __a ( self ): _lowercase : Optional[Any] = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' ) _lowercase : Optional[int] = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.9_76, 0.0_15, 0.0_09], } , ) _lowercase : Optional[int] = zero_shot_classifier( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks' ' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder' ' through an attention mechanism. We propose a new simple network architecture, the Transformer, based' ' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two' ' machine translation tasks show these models to be superior in quality while being more parallelizable' ' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014' ' English-to-German translation task, improving over the existing best results, including ensembles by' ' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new' ' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small' ' fraction of the training costs of the best models from the literature. We show that the Transformer' ' generalizes well to other tasks by applying it successfully to English constituency parsing both with' ' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_lowerCAmelCase , ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , { 'sequence': ( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural' ' networks in an encoder-decoder configuration. The best performing models also connect the' ' encoder and decoder through an attention mechanism. We propose a new simple network' ' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence' ' and convolutions entirely. Experiments on two machine translation tasks show these models to be' ' superior in quality while being more parallelizable and requiring significantly less time to' ' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,' ' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014' ' English-to-French translation task, our model establishes a new single-model state-of-the-art' ' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training' ' costs of the best models from the literature. We show that the Transformer generalizes well to' ' other tasks by applying it successfully to English constituency parsing both with large and' ' limited training data.' ), 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18], } , ) @slow @require_tf def __a ( self ): _lowercase : List[Any] = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' ) _lowercase : Optional[Any] = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.9_76, 0.0_15, 0.0_09], } , ) _lowercase : Tuple = zero_shot_classifier( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks' ' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder' ' through an attention mechanism. We propose a new simple network architecture, the Transformer, based' ' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two' ' machine translation tasks show these models to be superior in quality while being more parallelizable' ' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014' ' English-to-German translation task, improving over the existing best results, including ensembles by' ' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new' ' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small' ' fraction of the training costs of the best models from the literature. We show that the Transformer' ' generalizes well to other tasks by applying it successfully to English constituency parsing both with' ' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_lowerCAmelCase , ) self.assertEqual( nested_simplify(_lowerCAmelCase ) , { 'sequence': ( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural' ' networks in an encoder-decoder configuration. The best performing models also connect the' ' encoder and decoder through an attention mechanism. We propose a new simple network' ' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence' ' and convolutions entirely. Experiments on two machine translation tasks show these models to be' ' superior in quality while being more parallelizable and requiring significantly less time to' ' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,' ' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014' ' English-to-French translation task, our model establishes a new single-model state-of-the-art' ' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training' ' costs of the best models from the literature. We show that the Transformer generalizes well to' ' other tasks by applying it successfully to English constituency parsing both with large and' ' limited training data.' ), 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18], } , )
721
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Tuple = "ClapFeatureExtractor" _UpperCamelCase : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__( self , _lowerCAmelCase , _lowerCAmelCase ): super().__init__(_lowerCAmelCase , _lowerCAmelCase ) def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ): _lowercase : str = kwargs.pop('sampling_rate' , _lowerCAmelCase ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: _lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if audios is not None: _lowercase : Any = self.feature_extractor( _lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) if text is not None and audios is not None: _lowercase : Union[str, Any] = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase ) def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase ) def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ): return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase ) @property def __a ( self ): _lowercase : Dict = self.tokenizer.model_input_names _lowercase : Any = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
677
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
700
from __future__ import annotations from typing import Any class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase ): _lowercase : Any = num_of_nodes _lowercase : list[list[int]] = [] _lowercase : dict[int, int] = {} def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): self.m_edges.append([u_node, v_node, weight] ) def __a ( self , _lowerCAmelCase ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def __a ( self , _lowerCAmelCase ): if self.m_component[u_node] != u_node: for k in self.m_component: _lowercase : Optional[int] = self.find_component(_lowerCAmelCase ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): if component_size[u_node] <= component_size[v_node]: _lowercase : str = v_node component_size[v_node] += component_size[u_node] self.set_component(_lowerCAmelCase ) elif component_size[u_node] >= component_size[v_node]: _lowercase : Any = self.find_component(_lowerCAmelCase ) component_size[u_node] += component_size[v_node] self.set_component(_lowerCAmelCase ) def __a ( self ): _lowercase : Any = [] _lowercase : Optional[Any] = 0 _lowercase : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) _lowercase : str = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: _lowercase , _lowercase , _lowercase : List[str] = edge _lowercase : Union[str, Any] = self.m_component[u] _lowercase : Union[str, Any] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): _lowercase : str = [u, v, w] for edge in minimum_weight_edge: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowercase , _lowercase , _lowercase : int = edge _lowercase : Optional[int] = self.m_component[u] _lowercase : Optional[Any] = self.m_component[v] if u_component != v_component: mst_weight += w self.union(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 _lowercase : str = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def __magic_name__ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
677
0
import os from datetime import datetime as dt from github import Github UpperCamelCase = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def __magic_name__ ( ) -> List[Any]: _lowercase : Tuple = Github(os.environ['GITHUB_TOKEN'] ) _lowercase : Dict = g.get_repo('huggingface/diffusers' ) _lowercase : int = repo.get_issues(state='open' ) for issue in open_issues: _lowercase : Optional[Any] = sorted(issue.get_comments() , key=lambda SCREAMING_SNAKE_CASE : i.created_at , reverse=SCREAMING_SNAKE_CASE ) _lowercase : Union[str, Any] = comments[0] if len(SCREAMING_SNAKE_CASE ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
701
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: _lowercase : Tuple = {} _lowercase : str = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE )['input_ids'] _lowercase : List[str] = len(example['content'] ) / len(output['input_ids'] ) return output UpperCamelCase = HfArgumentParser(PretokenizationArguments) UpperCamelCase = parser.parse_args() if args.num_workers is None: UpperCamelCase = multiprocessing.cpu_count() UpperCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCamelCase = time.time() UpperCamelCase = load_dataset(args.dataset_name, split="train") print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() UpperCamelCase = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ "repo_name", "path", "copies", "size", "content", "license", "hash", "line_mean", "line_max", "alpha_frac", "autogenerated", ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') UpperCamelCase = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
677
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase = { "vocab_file": { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt" ), } } UpperCamelCase = { "junnyu/roformer_chinese_small": 1_536, "junnyu/roformer_chinese_base": 1_536, "junnyu/roformer_chinese_char_small": 512, "junnyu/roformer_chinese_char_base": 512, "junnyu/roformer_small_discriminator": 128, "junnyu/roformer_small_generator": 128, } UpperCamelCase = { "junnyu/roformer_chinese_small": {"do_lower_case": True}, "junnyu/roformer_chinese_base": {"do_lower_case": True}, "junnyu/roformer_chinese_char_small": {"do_lower_case": True}, "junnyu/roformer_chinese_char_base": {"do_lower_case": True}, "junnyu/roformer_small_discriminator": {"do_lower_case": True}, "junnyu/roformer_small_generator": {"do_lower_case": True}, } class lowerCAmelCase_ ( __snake_case ): _UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Union[str, Any] = RoFormerTokenizer def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ): super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) _lowercase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('lowercase' , _lowerCAmelCase ) != do_lower_case or pre_tok_state.get('strip_accents' , _lowerCAmelCase ) != strip_accents ): _lowercase : Any = getattr(_lowerCAmelCase , pre_tok_state.pop('type' ) ) _lowercase : Union[str, Any] = do_lower_case _lowercase : Optional[int] = strip_accents _lowercase : int = pre_tok_class(**_lowerCAmelCase ) _lowercase : Tuple = do_lower_case def __getstate__( self ): _lowercase : Tuple = self.__dict__.copy() _lowercase : str = BertPreTokenizer() return state def __setstate__( self , _lowerCAmelCase ): _lowercase : int = d _lowercase : Any = self.__dict__['_tokenizer'].get_vocab() _lowercase : int = PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase ) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None ): _lowercase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : Optional[Any] = [self.sep_token_id] _lowercase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ): _lowercase : int = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , **_lowerCAmelCase , ): _lowercase : str = BertPreTokenizer() return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
702
import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = {"facebook/bart-base": BartForConditionalGeneration} UpperCamelCase = {"facebook/bart-base": BartTokenizer} def __magic_name__ ( ) -> str: _lowercase : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' ) parser.add_argument( '--validation_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='A csv or a json file containing the validation data.' ) parser.add_argument( '--max_length' , type=SCREAMING_SNAKE_CASE , default=5 , help='The maximum total input sequence length after tokenization.' , ) parser.add_argument( '--num_beams' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help=( 'Number of beams to use for evaluation. This argument will be ' 'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.' ) , ) parser.add_argument( '--model_name_or_path' , type=SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE , ) parser.add_argument( '--config_name' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name' , ) parser.add_argument( '--device' , type=SCREAMING_SNAKE_CASE , default='cpu' , help='Device where the model will be run' , ) parser.add_argument('--output_file_path' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Where to store the final ONNX file.' ) _lowercase : Optional[Any] = parser.parse_args() return args def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="cpu" ) -> List[Any]: _lowercase : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ) _lowercase : int = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE ) if model_name in ["facebook/bart-base"]: _lowercase : Dict = 0 _lowercase : Optional[int] = None _lowercase : Union[str, Any] = 0 return huggingface_model, tokenizer def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: model.eval() _lowercase : List[Any] = None _lowercase : List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE ) ) with torch.no_grad(): _lowercase : Optional[int] = 'My friends are cool but they eat too many carbs.' _lowercase : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device ) _lowercase : str = model.generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( SCREAMING_SNAKE_CASE , ( inputs['input_ids'], inputs['attention_mask'], num_beams, max_length, model.config.decoder_start_token_id, ) , SCREAMING_SNAKE_CASE , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={ 'input_ids': {0: 'batch', 1: 'seq'}, 'output_ids': {0: 'batch', 1: 'seq_out'}, } , example_outputs=SCREAMING_SNAKE_CASE , ) logger.info('Model exported to {}'.format(SCREAMING_SNAKE_CASE ) ) _lowercase : str = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE ) ) logger.info('Deduplicated and optimized model written to {}'.format(SCREAMING_SNAKE_CASE ) ) _lowercase : Union[str, Any] = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE ) _lowercase : Union[str, Any] = ort_sess.run( SCREAMING_SNAKE_CASE , { 'input_ids': inputs['input_ids'].cpu().numpy(), 'attention_mask': inputs['attention_mask'].cpu().numpy(), 'num_beams': np.array(SCREAMING_SNAKE_CASE ), 'max_length': np.array(SCREAMING_SNAKE_CASE ), 'decoder_start_token_id': np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info('Model outputs from torch and ONNX Runtime are similar.' ) logger.info('Success.' ) def __magic_name__ ( ) -> Any: _lowercase : Dict = parse_args() _lowercase : Union[str, Any] = 5 _lowercase : Union[str, Any] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() _lowercase : Optional[Any] = torch.device(args.device ) _lowercase , _lowercase : List[Any] = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE ) if model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' ) model.to(SCREAMING_SNAKE_CASE ) if args.max_length: _lowercase : Any = args.max_length if args.num_beams: _lowercase : List[str] = args.num_beams if args.output_file_path: _lowercase : Union[str, Any] = args.output_file_path else: _lowercase : Tuple = 'BART.onnx' logger.info('Exporting model to ONNX' ) export_and_validate_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
677
0