code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
lowercase_ = [ (1000, "M"), (900, "CM"), (500, "D"), (400, "CD"), (100, "C"), (90, "XC"), (50, "L"), (40, "XL"), (10, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I"), ] def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> int: '''simple docstring''' A__ = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000} A__ = 0 A__ = 0 while place < len(SCREAMING_SNAKE_CASE__ ): if (place + 1 < len(SCREAMING_SNAKE_CASE__ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> str: '''simple docstring''' A__ = [] for arabic, roman in ROMAN: ((A__) , (A__)) = divmod(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) result.append(roman * factor ) if number == 0: break return "".join(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod()
7
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 't5' lowerCamelCase = ['past_key_values'] lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Union[str, Any],lowercase_ : int=3_2_1_2_8,lowercase_ : int=5_1_2,lowercase_ : List[str]=6_4,lowercase_ : Tuple=2_0_4_8,lowercase_ : Any=6,lowercase_ : List[str]=None,lowercase_ : Union[str, Any]=8,lowercase_ : int=3_2,lowercase_ : Dict=1_2_8,lowercase_ : Optional[int]=0.1,lowercase_ : List[str]=1E-6,lowercase_ : Tuple=1.0,lowercase_ : Any="relu",lowercase_ : Union[str, Any]=True,lowercase_ : Optional[Any]=True,lowercase_ : int=0,lowercase_ : str=1,**lowercase_ : str,)-> Any: '''simple docstring''' A__ = vocab_size A__ = d_model A__ = d_kv A__ = d_ff A__ = num_layers A__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry A__ = num_heads A__ = relative_attention_num_buckets A__ = relative_attention_max_distance A__ = dropout_rate A__ = layer_norm_epsilon A__ = initializer_factor A__ = feed_forward_proj A__ = use_cache A__ = self.feed_forward_proj.split('-' ) A__ = act_info[-1] A__ = act_info[0] == 'gated' if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": A__ = 'gelu_new' super().__init__( pad_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,**lowercase_,) class A ( _UpperCAmelCase ): """simple docstring""" @property def snake_case__ ( self : Tuple )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' A__ = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: A__ = 'past_encoder_sequence + sequence' A__ = {0: 'batch'} A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: A__ = {0: 'batch', 1: 'decoder_sequence'} A__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowercase_,direction='inputs' ) return common_inputs @property def snake_case__ ( self : Any )-> int: '''simple docstring''' return 1_3
7
1
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> str: '''simple docstring''' A__ = 0 # if input_string is "aba" than new_input_string become "a|b|a" A__ = '' A__ = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(SCREAMING_SNAKE_CASE__ ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring A__ , A__ = 0, 0 # length[i] shows the length of palindromic substring with center i A__ = [1 for i in range(len(SCREAMING_SNAKE_CASE__ ) )] # for each character in new_string find corresponding palindromic string A__ = 0 for j in range(len(SCREAMING_SNAKE_CASE__ ) ): A__ = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(SCREAMING_SNAKE_CASE__ ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 A__ = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: A__ = j - k + 1 # noqa: E741 A__ = j + k - 1 # update max_length and start position if max_length < length[j]: A__ = length[j] A__ = j # create that string A__ = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
7
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: A__ = mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: A__ = max( mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - wt[i - 1] ) + val[i - 1] , ) A__ = val return f[i][j] def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple: '''simple docstring''' A__ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: A__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: A__ = dp[i - 1][w_] return dp[n][w_], dp def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ) -> Union[str, Any]: '''simple docstring''' if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) A__ = len(SCREAMING_SNAKE_CASE__ ) if num_items != len(SCREAMING_SNAKE_CASE__ ): A__ = ( 'The number of weights must be the same as the number of values.\n' f'But got {num_items} weights and {len(SCREAMING_SNAKE_CASE__ )} values' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ): if not isinstance(wt[i] , SCREAMING_SNAKE_CASE__ ): A__ = ( 'All weights must be integers but got weight of ' f'type {type(wt[i] )} at index {i}' ) raise TypeError(SCREAMING_SNAKE_CASE__ ) A__ , A__ = knapsack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = set() _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return optimal_val, example_optional_set def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : set ) -> Optional[int]: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: optimal_set.add(SCREAMING_SNAKE_CASE__ ) _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , j - wt[i - 1] , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = [3, 2, 4, 4] lowercase_ = [4, 3, 2, 3] lowercase_ = 4 lowercase_ = 6 lowercase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowercase_ , lowercase_ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowercase_ , lowercase_ = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
7
1
from collections import defaultdict class A : """simple docstring""" def __init__( self : List[Any],lowercase_ : str,lowercase_ : str )-> Optional[int]: '''simple docstring''' A__ = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 A__ = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(lowercase_ ) ) ] A__ = defaultdict(lowercase_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 A__ = (1 << len(lowercase_ )) - 1 def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : List[Any] )-> Optional[Any]: '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement A__ = self.count_ways_until(lowercase_,task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p),task_no + 1 ) # save the value. A__ = total_ways_util return self.dp[mask][task_no] def snake_case__ ( self : Optional[int],lowercase_ : Dict )-> Dict: '''simple docstring''' for i in range(len(lowercase_ ) ): for j in task_performed[i]: self.task[j].append(lowercase_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0,1 ) if __name__ == "__main__": lowercase_ = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. lowercase_ = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
7
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = AlbertTokenizer lowerCamelCase = AlbertTokenizerFast lowerCamelCase = True lowerCamelCase = True lowerCamelCase = True def snake_case__ ( self : Dict )-> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = AlbertTokenizer(lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : List[str],lowercase_ : str )-> Any: '''simple docstring''' A__ = 'this is a test' A__ = 'this is a test' return input_text, output_text def snake_case__ ( self : List[Any] )-> Optional[int]: '''simple docstring''' A__ = '<pad>' A__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : List[str] )-> str: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0],'<pad>' ) self.assertEqual(vocab_keys[1],'<unk>' ) self.assertEqual(vocab_keys[-1],'▁eloquent' ) self.assertEqual(len(lowercase_ ),3_0_0_0_0 ) def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size,3_0_0_0_0 ) def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = 'I was born in 92000, and this is falsé.' A__ = tokenizer.tokenize(lowercase_ ) A__ = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(lowercase_ ) A__ = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) def snake_case__ ( self : int )-> int: '''simple docstring''' A__ = AlbertTokenizer(lowercase_,keep_accents=lowercase_ ) A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁this', '▁is', '▁a', '▁test'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[4_8, 2_5, 2_1, 1_2_8_9] ) A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] ) A__ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual(lowercase_,[3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] ) A__ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'],) def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' A__ = AlbertTokenizer(lowercase_ ) A__ = tokenizer.encode('sequence builders' ) A__ = tokenizer.encode('multi-sequence build' ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' A__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='albert-base-v2',revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e',)
7
1
import math def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> list: '''simple docstring''' A__ = [True] * n A__ = False A__ = False A__ = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): A__ = i * 2 while index < n: A__ = False A__ = index + i A__ = [2] for i in range(3 , SCREAMING_SNAKE_CASE__ , 2 ): if is_prime[i]: primes.append(SCREAMING_SNAKE_CASE__ ) return primes def _snake_case( SCREAMING_SNAKE_CASE__ : int = 999966663333 ) -> int: '''simple docstring''' A__ = math.floor(math.sqrt(SCREAMING_SNAKE_CASE__ ) ) + 100 A__ = prime_sieve(SCREAMING_SNAKE_CASE__ ) A__ = 0 A__ = 0 A__ = primes[prime_index] while (last_prime**2) <= limit: A__ = primes[prime_index + 1] A__ = last_prime**2 A__ = next_prime**2 # Get numbers divisible by lps(current) A__ = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) A__ = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps A__ = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair A__ = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
7
from typing import Dict from .base import GenericTensor, Pipeline class A ( _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : int,lowercase_ : Dict=None,lowercase_ : Tuple=None,lowercase_ : List[Any]=None,**lowercase_ : Any )-> Optional[Any]: '''simple docstring''' if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def snake_case__ ( self : Dict,lowercase_ : List[Any],**lowercase_ : Tuple )-> Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework A__ = self.tokenizer(lowercase_,return_tensors=lowercase_,**lowercase_ ) return model_inputs def snake_case__ ( self : Tuple,lowercase_ : int )-> Optional[Any]: '''simple docstring''' A__ = self.model(**lowercase_ ) return model_outputs def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : List[str]=False )-> Any: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[Any],*lowercase_ : int,**lowercase_ : Optional[Any] )-> int: '''simple docstring''' return super().__call__(*lowercase_,**lowercase_ )
7
1
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract lowercase_ = logging.get_logger(__name__) def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]: '''simple docstring''' return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def _snake_case( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] ) -> Union[str, Any]: '''simple docstring''' A__ = to_pil_image(SCREAMING_SNAKE_CASE__ ) A__ , A__ = pil_image.size A__ = pytesseract.image_to_data(SCREAMING_SNAKE_CASE__ , lang=SCREAMING_SNAKE_CASE__ , output_type='dict' , config=SCREAMING_SNAKE_CASE__ ) A__ , A__ , A__ , A__ , A__ = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates A__ = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE__ ) if not word.strip()] A__ = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices] A__ = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices] A__ = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices] A__ = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices] A__ = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE__ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format A__ = [] for x, y, w, h in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): A__ = [x, y, x + w, y + h] actual_boxes.append(SCREAMING_SNAKE_CASE__ ) # finally, normalize the bounding boxes A__ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = ['pixel_values'] def __init__( self : Tuple,lowercase_ : bool = True,lowercase_ : Dict[str, int] = None,lowercase_ : PILImageResampling = PILImageResampling.BILINEAR,lowercase_ : bool = True,lowercase_ : float = 1 / 2_5_5,lowercase_ : bool = True,lowercase_ : Union[float, Iterable[float]] = None,lowercase_ : Union[float, Iterable[float]] = None,lowercase_ : bool = True,lowercase_ : Optional[str] = None,lowercase_ : Optional[str] = "",**lowercase_ : Optional[int],)-> None: '''simple docstring''' super().__init__(**lowercase_ ) A__ = size if size is not None else {'height': 2_2_4, 'width': 2_2_4} A__ = get_size_dict(lowercase_ ) A__ = do_resize A__ = size A__ = resample A__ = do_rescale A__ = rescale_value A__ = do_normalize A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD A__ = apply_ocr A__ = ocr_lang A__ = tesseract_config def snake_case__ ( self : Any,lowercase_ : np.ndarray,lowercase_ : Dict[str, int],lowercase_ : PILImageResampling = PILImageResampling.BILINEAR,lowercase_ : Optional[Union[str, ChannelDimension]] = None,**lowercase_ : int,)-> np.ndarray: '''simple docstring''' A__ = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' ) A__ = (size['height'], size['width']) return resize(lowercase_,size=lowercase_,resample=lowercase_,data_format=lowercase_,**lowercase_ ) def snake_case__ ( self : List[Any],lowercase_ : np.ndarray,lowercase_ : Union[int, float],lowercase_ : Optional[Union[str, ChannelDimension]] = None,**lowercase_ : Optional[int],)-> np.ndarray: '''simple docstring''' return rescale(lowercase_,scale=lowercase_,data_format=lowercase_,**lowercase_ ) def snake_case__ ( self : Optional[Any],lowercase_ : np.ndarray,lowercase_ : Union[float, Iterable[float]],lowercase_ : Union[float, Iterable[float]],lowercase_ : Optional[Union[str, ChannelDimension]] = None,**lowercase_ : Optional[int],)-> np.ndarray: '''simple docstring''' return normalize(lowercase_,mean=lowercase_,std=lowercase_,data_format=lowercase_,**lowercase_ ) def snake_case__ ( self : Dict,lowercase_ : ImageInput,lowercase_ : bool = None,lowercase_ : Dict[str, int] = None,lowercase_ : List[Any]=None,lowercase_ : bool = None,lowercase_ : float = None,lowercase_ : bool = None,lowercase_ : Union[float, Iterable[float]] = None,lowercase_ : Union[float, Iterable[float]] = None,lowercase_ : bool = None,lowercase_ : Optional[str] = None,lowercase_ : Optional[str] = None,lowercase_ : Optional[Union[str, TensorType]] = None,lowercase_ : ChannelDimension = ChannelDimension.FIRST,**lowercase_ : Tuple,)-> PIL.Image.Image: '''simple docstring''' A__ = do_resize if do_resize is not None else self.do_resize A__ = size if size is not None else self.size A__ = get_size_dict(lowercase_ ) A__ = resample if resample is not None else self.resample A__ = do_rescale if do_rescale is not None else self.do_rescale A__ = rescale_factor if rescale_factor is not None else self.rescale_factor A__ = do_normalize if do_normalize is not None else self.do_normalize A__ = image_mean if image_mean is not None else self.image_mean A__ = image_std if image_std is not None else self.image_std A__ = apply_ocr if apply_ocr is not None else self.apply_ocr A__ = ocr_lang if ocr_lang is not None else self.ocr_lang A__ = tesseract_config if tesseract_config is not None else self.tesseract_config A__ = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' ) # All transformations expect numpy arrays. A__ = [to_numpy_array(lowercase_ ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self,'pytesseract' ) A__ = [] A__ = [] for image in images: A__ , A__ = apply_tesseract(lowercase_,lowercase_,lowercase_ ) words_batch.append(lowercase_ ) boxes_batch.append(lowercase_ ) if do_resize: A__ = [self.resize(image=lowercase_,size=lowercase_,resample=lowercase_ ) for image in images] if do_rescale: A__ = [self.rescale(image=lowercase_,scale=lowercase_ ) for image in images] if do_normalize: A__ = [self.normalize(image=lowercase_,mean=lowercase_,std=lowercase_ ) for image in images] A__ = [to_channel_dimension_format(lowercase_,lowercase_ ) for image in images] A__ = BatchFeature(data={'pixel_values': images},tensor_type=lowercase_ ) if apply_ocr: A__ = words_batch A__ = boxes_batch return data
7
from timeit import timeit def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) A__ = 0 while number: number &= number - 1 result += 1 return result def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) A__ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def _snake_case( ) -> None: '''simple docstring''' def do_benchmark(SCREAMING_SNAKE_CASE__ : int ) -> None: A__ = 'import __main__ as z' print(f'Benchmark when {number = }:' ) print(f'{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE__ ) = }' ) A__ = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=SCREAMING_SNAKE_CASE__ ) print(f'timeit() runs in {timing} seconds' ) print(f'{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE__ ) = }' ) A__ = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=SCREAMING_SNAKE_CASE__ , ) print(f'timeit() runs in {timing} seconds' ) for number in (25, 37, 58, 0): do_benchmark(SCREAMING_SNAKE_CASE__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
7
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = { "configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"], "tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "AdaptiveEmbedding", "TransfoXLForSequenceClassification", "TransfoXLLMHeadModel", "TransfoXLModel", "TransfoXLPreTrainedModel", "load_tf_weights_in_transfo_xl", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAdaptiveEmbedding", "TFTransfoXLForSequenceClassification", "TFTransfoXLLMHeadModel", "TFTransfoXLMainLayer", "TFTransfoXLModel", "TFTransfoXLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int: '''simple docstring''' A__ = 384 A__ = 7 if "tiny" in model_name: A__ = 96 A__ = (2, 2, 6, 2) A__ = (3, 6, 12, 24) elif "small" in model_name: A__ = 96 A__ = (2, 2, 18, 2) A__ = (3, 6, 12, 24) elif "base" in model_name: A__ = 128 A__ = (2, 2, 18, 2) A__ = (4, 8, 16, 32) A__ = 12 A__ = 512 elif "large" in model_name: A__ = 192 A__ = (2, 2, 18, 2) A__ = (6, 12, 24, 48) A__ = 12 A__ = 768 # set label information A__ = 150 A__ = 'huggingface/label-files' A__ = 'ade20k-id2label.json' A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) ) A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} A__ = {v: k for k, v in idalabel.items()} A__ = SwinConfig( embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) A__ = UperNetConfig( backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , ) return config def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: '''simple docstring''' A__ = [] # fmt: off # stem rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]: '''simple docstring''' A__ = dct.pop(SCREAMING_SNAKE_CASE__ ) A__ = val def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: '''simple docstring''' A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): A__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' ) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[:dim, :] A__ = in_proj_bias[: dim] A__ = in_proj_weight[ dim : dim * 2, : ] A__ = in_proj_bias[ dim : dim * 2 ] A__ = in_proj_weight[ -dim :, : ] A__ = in_proj_bias[-dim :] # fmt: on def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 ) A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 ) A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(4 , in_channel // 4 ) A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(in_channel // 4 , 4 ) A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A__ = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } A__ = model_name_to_url[model_name] A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[ 'state_dict' ] for name, param in state_dict.items(): print(SCREAMING_SNAKE_CASE__ , param.shape ) A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ ) A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "bn" in key: A__ = key.replace('bn' , 'batch_norm' ) A__ = val # rename keys A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ ) if "norm" in key: A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # verify on image A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' ) A__ = SegformerImageProcessor() A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits print(logits.shape ) print('First values of logits:' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": A__ = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": A__ = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": A__ = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": A__ = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(f'openmmlab/{model_name}' ) processor.push_to_hub(f'openmmlab/{model_name}' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-swin-tiny", type=str, choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]], help="Name of the Swin + UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowercase_ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
7
1
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class A ( unittest.TestCase , _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : str )-> Union[str, Any]: '''simple docstring''' A__ = load_tool('text-to-speech' ) self.tool.setup() def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) A__ = self.tool('hey' ) A__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3],torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ),) ) def snake_case__ ( self : int )-> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) A__ = self.tool('hey' ) A__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3],torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ),) )
7
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed lowercase_ = "true" def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=82 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 ) -> Optional[Any]: '''simple docstring''' set_seed(42 ) A__ = RegressionModel() A__ = deepcopy(SCREAMING_SNAKE_CASE__ ) A__ = RegressionDataset(length=SCREAMING_SNAKE_CASE__ ) A__ = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) model.to(accelerator.device ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model, ddp_model, dataloader def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' ) A__ = load_dataset('glue' , 'mrpc' , split='validation' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : List[Any] ): A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs with accelerator.main_process_first(): A__ = dataset.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) A__ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(SCREAMING_SNAKE_CASE__ : Dict ): if use_longest: return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=16 ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> str: '''simple docstring''' A__ = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE__ , split_batches=SCREAMING_SNAKE_CASE__ ) A__ = get_dataloader(SCREAMING_SNAKE_CASE__ , not dispatch_batches ) A__ = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: '''simple docstring''' A__ = [] for batch in dataloader: A__ , A__ = batch.values() with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) A__ , A__ = [], [] for logit, targ in logits_and_targets: logits.append(SCREAMING_SNAKE_CASE__ ) targs.append(SCREAMING_SNAKE_CASE__ ) A__ , A__ = torch.cat(SCREAMING_SNAKE_CASE__ ), torch.cat(SCREAMING_SNAKE_CASE__ ) return logits, targs def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int=82 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=16 ) -> List[Any]: '''simple docstring''' A__ , A__ , A__ = get_basic_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ , A__ = generate_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert ( len(SCREAMING_SNAKE_CASE__ ) == num_samples ), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE__ )}' def _snake_case( SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False ) -> str: '''simple docstring''' A__ = evaluate.load('glue' , 'mrpc' ) A__ , A__ = get_mrpc_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # First do baseline A__ , A__ , A__ = setup['no'] model.to(SCREAMING_SNAKE_CASE__ ) model.eval() for batch in dataloader: batch.to(SCREAMING_SNAKE_CASE__ ) with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=batch['labels'] ) A__ = metric.compute() # Then do distributed A__ , A__ , A__ = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) A__ = batch['labels'] A__ , A__ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ ) A__ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n' def _snake_case( ) -> Optional[Any]: '''simple docstring''' A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' ) test_mrpc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' ) test_torch_metrics(SCREAMING_SNAKE_CASE__ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**' ) A__ = Accelerator() test_torch_metrics(SCREAMING_SNAKE_CASE__ , 512 ) accelerator.state._reset_state() def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' main() if __name__ == "__main__": main()
7
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available lowercase_ = {"tokenization_herbert": ["HerbertTokenizer"]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["HerbertTokenizerFast"] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: '''simple docstring''' A__ = 0 A__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None A__ = sorted_collection[point] if current_item == item: return point else: if point < left: A__ = left A__ = point elif point > right: A__ = right A__ = point else: if item < current_item: A__ = point - 1 else: A__ = point + 1 return None def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif point > right: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 ) else: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' if collection != sorted(SCREAMING_SNAKE_CASE__ ): raise ValueError('Collection must be ascending sorted' ) return True if __name__ == "__main__": import sys lowercase_ = 0 if debug == 1: lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") lowercase_ = 67 lowercase_ = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("Not found")
7
1
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ReformerTokenizer lowerCamelCase = ReformerTokenizerFast lowerCamelCase = True lowerCamelCase = False lowerCamelCase = True def snake_case__ ( self : Any )-> str: '''simple docstring''' super().setUp() A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : Optional[int] )-> Optional[int]: '''simple docstring''' A__ = '<s>' A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0],'<unk>' ) self.assertEqual(vocab_keys[1],'<s>' ) self.assertEqual(vocab_keys[-1],'j' ) self.assertEqual(len(lowercase_ ),1_0_0_0 ) def snake_case__ ( self : Dict )-> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size,1_0_0_0 ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = 'I was born in 92000, and this is falsé.' A__ = tokenizer.tokenize(lowercase_ ) A__ = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(lowercase_ ) A__ = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) def snake_case__ ( self : int,lowercase_ : Optional[int]=1_5 )-> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ ) # Simple input A__ = 'This is a simple input' A__ = ['This is a simple input 1', 'This is a simple input 2'] A__ = ('This is a simple input', 'This is a pair') A__ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' ) # Simple input self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' ) # Simple input self.assertRaises( lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',) # Pair input self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' ) # Pair input self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' ) # Pair input self.assertRaises( lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',) def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' pass def snake_case__ ( self : Dict )-> str: '''simple docstring''' A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ ) A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ),[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2],) A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ],) A__ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4],) A__ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ],) @cached_property def snake_case__ ( self : Optional[int] )-> Any: '''simple docstring''' return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' ) @slow def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = 'Hello World!' A__ = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7] self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) ) @slow def snake_case__ ( self : Optional[int] )-> str: '''simple docstring''' A__ = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) A__ = [ 1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 3_5, 2_8, 2_7_5, 3, 2_5_9, 2_9_7, 2_6_0, 8_4, 4, 3_5, 1_1_0, 4_4, 8, 2_5_9, 9_1, 2_6_8, 2_1, 1_1, 2_0_9, 2_7_4, 1_0_9, 2_6_6, 2_7_7, 1_1_7, 8_6, 9_3, 3_1_5, 2_5_8, 2_7_8, 2_5_8, 2_7_7, 2_5_8, 0, 2_5_8, 2_8_8, 2_5_8, 3_1_9, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 2_8_7, 2_5_8, 3_1_5, 2_5_8, 2_8_9, 2_5_8, 2_7_8, 9_9, 2_6_9, 2_6_6, 2_6_2, 8, 2_5_9, 2_4_1, 4, 2_1_7, 2_3_0, 2_6_8, 2_6_6, 5_5, 1_6_8, 1_0_6, 7_5, 1_9_3, 2_6_6, 2_2_3, 2_7, 4_9, 2_6, 2_8_2, 2_5, 2_6_4, 2_9_9, 1_9, 2_6, 0, 2_5_8, 2_7_7, 1_1_7, 8_6, 9_3, 1_7_6, 1_8_3, 2_7_0, 1_1, 2_6_2, 4_2, 6_1, 2_6_5, ] self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) ) @require_torch @slow def snake_case__ ( self : int )-> Any: '''simple docstring''' import torch from transformers import ReformerConfig, ReformerModel # Build sequence A__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0] A__ = ' '.join(lowercase_ ) A__ = self.big_tokenizer.encode_plus(lowercase_,return_tensors='pt' ) A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors='pt' ) A__ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) A__ = encoded_sequence['input_ids'].shape A__ = ReformerModel(lowercase_ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase_ ) model(**lowercase_ ) @slow def snake_case__ ( self : int )-> Tuple: '''simple docstring''' A__ = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 A__ = [ 'This is a very simple sentence.', 'The quick brown fox jumps over the lazy dog.', ] self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='google/reformer-crime-and-punishment',revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a',padding=lowercase_,sequences=lowercase_,)
7
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def _snake_case( ) -> Dict: '''simple docstring''' A__ = ArgumentParser( 'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE__ ) A__ = parser.add_subparsers(help='datasets-cli command helpers' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) TestCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) # Parse args A__ , A__ = parser.parse_known_args() if not hasattr(SCREAMING_SNAKE_CASE__ , 'func' ): parser.print_help() exit(1 ) A__ = parse_unknown_args(SCREAMING_SNAKE_CASE__ ) # Run A__ = args.func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) service.run() if __name__ == "__main__": main()
7
1
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A__ = len(SCREAMING_SNAKE_CASE__ ) while cur > 1: # Find the maximum number in arr A__ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi A__ = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE__ )] # Reverse whole list A__ = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE__ )] cur -= 1 return arr if __name__ == "__main__": lowercase_ = input("Enter numbers separated by a comma:\n").strip() lowercase_ = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
7
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A : """simple docstring""" def __init__( self : Union[str, Any],lowercase_ : Any,lowercase_ : Union[str, Any]=1_3,lowercase_ : Tuple=3_0,lowercase_ : List[Any]=2,lowercase_ : Optional[int]=3,lowercase_ : Union[str, Any]=True,lowercase_ : Tuple=True,lowercase_ : Any=3_2,lowercase_ : List[str]=2,lowercase_ : Optional[int]=4,lowercase_ : Union[str, Any]=3_7,lowercase_ : Tuple="gelu",lowercase_ : str=0.1,lowercase_ : Tuple=0.1,lowercase_ : Union[str, Any]=1_0,lowercase_ : int=0.02,lowercase_ : List[Any]=3,lowercase_ : Any=None,)-> Dict: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A__ = (image_size // patch_size) ** 2 A__ = num_patches + 1 def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' return ViTConfig( image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=lowercase_,initializer_range=self.initializer_range,) def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Tuple )-> Optional[Any]: '''simple docstring''' A__ = TFViTModel(config=lowercase_ ) A__ = model(lowercase_,training=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) A__ = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) ) def snake_case__ ( self : List[Any],lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : List[Any] )-> Dict: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFViTForImageClassification(lowercase_ ) A__ = model(lowercase_,labels=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ = 1 A__ = TFViTForImageClassification(lowercase_ ) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () lowerCamelCase = ( {'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' A__ = TFViTModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' pass @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Any )-> int: '''simple docstring''' pass def snake_case__ ( self : str )-> Dict: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_,tf.keras.layers.Layer ) ) def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) A__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1],lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : Optional[Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(lowercase_ ) def _snake_case( ) -> str: '''simple docstring''' A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : List[Any] )-> str: '''simple docstring''' return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def snake_case__ ( self : Any )-> Dict: '''simple docstring''' A__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=lowercase_,return_tensors='tf' ) # forward pass A__ = model(**lowercase_ ) # verify the logits A__ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape,lowercase_ ) A__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] ) tf.debugging.assert_near(outputs.logits[0, :3],lowercase_,atol=1E-4 )
7
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
7
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class A : """simple docstring""" def __init__( self : str,lowercase_ : Any,lowercase_ : Tuple=1_3,lowercase_ : str=7,lowercase_ : Tuple=True,lowercase_ : int=True,lowercase_ : List[Any]=True,lowercase_ : List[str]=True,lowercase_ : List[str]=9_9,lowercase_ : List[Any]=6_4,lowercase_ : List[str]=5,lowercase_ : Optional[Any]=4,lowercase_ : Optional[Any]=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : int=0.1,lowercase_ : str=0.1,lowercase_ : Optional[Any]=5_1_2,lowercase_ : int=1_6,lowercase_ : List[Any]=2,lowercase_ : Union[str, Any]=0.02,lowercase_ : Tuple=3,lowercase_ : List[Any]=4,lowercase_ : str=None,)-> Union[str, Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope A__ = vocab_size - 1 def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) A__ = self.get_config() return config, input_ids, input_mask, token_labels def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,pad_token_id=self.pad_token_id,) def snake_case__ ( self : Optional[int] )-> Union[str, Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = True return config, input_ids, input_mask, token_labels def snake_case__ ( self : Any,lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : str )-> Any: '''simple docstring''' A__ = GPTNeoXModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) A__ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Union[str, Any],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Optional[Any] )-> Tuple: '''simple docstring''' A__ = True A__ = GPTNeoXModel(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : List[str] )-> List[str]: '''simple docstring''' A__ = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Dict,lowercase_ : Any )-> int: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForQuestionAnswering(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Optional[int] )-> str: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def snake_case__ ( self : Any,lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : int )-> Union[str, Any]: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForTokenClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : int,lowercase_ : str,lowercase_ : int,lowercase_ : Union[str, Any] )-> List[Any]: '''simple docstring''' A__ = True A__ = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() # first forward pass A__ = model(lowercase_,attention_mask=lowercase_,use_cache=lowercase_ ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3),config.vocab_size ) A__ = ids_tensor((self.batch_size, 3),vocab_size=2 ) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens],dim=-1 ) A__ = torch.cat([input_mask, next_mask],dim=-1 ) A__ = model(lowercase_,attention_mask=lowercase_,output_hidden_states=lowercase_ ) A__ = output_from_no_past['hidden_states'][0] A__ = model( lowercase_,attention_mask=lowercase_,past_key_values=lowercase_,output_hidden_states=lowercase_,)['hidden_states'][0] # select random slice A__ = ids_tensor((1,),output_from_past.shape[-1] ).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-3 ) ) def snake_case__ ( self : str )-> Union[str, Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowerCamelCase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = GPTNeoXModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,hidden_size=6_4,num_attention_heads=8 ) def snake_case__ ( self : Optional[Any] )-> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : List[str] )-> Any: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Dict )-> Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowercase_ ) def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_ ) def snake_case__ ( self : Any )-> List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) @unittest.skip(reason='Feed forward chunking is not implemented' ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)] ) def snake_case__ ( self : List[str],lowercase_ : Any )-> List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = ids_tensor([1, 1_0],config.vocab_size ) A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights A__ = GPTNeoXModel(lowercase_ ) original_model.to(lowercase_ ) original_model.eval() A__ = original_model(lowercase_ ).last_hidden_state A__ = original_model(lowercase_ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights A__ = {'type': scaling_type, 'factor': 10.0} A__ = GPTNeoXModel(lowercase_ ) scaled_model.to(lowercase_ ) scaled_model.eval() A__ = scaled_model(lowercase_ ).last_hidden_state A__ = scaled_model(lowercase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) else: self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) @require_torch class A ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Tuple )-> Union[str, Any]: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' ) for checkpointing in [True, False]: A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowercase_ ) A__ = tokenizer('My favorite food is',return_tensors='pt' ).to(lowercase_ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure' A__ = model.generate(**lowercase_,do_sample=lowercase_,max_new_tokens=2_0 ) A__ = tokenizer.batch_decode(lowercase_ )[0] self.assertEqual(lowercase_,lowercase_ )
7
1
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class A ( _UpperCAmelCase ): """simple docstring""" def __init__( self : str,lowercase_ : NestedDataStructureLike[PathLike],lowercase_ : Optional[NamedSplit] = None,lowercase_ : Optional[Features] = None,lowercase_ : str = None,lowercase_ : bool = False,lowercase_ : bool = False,lowercase_ : Optional[str] = None,lowercase_ : Optional[int] = None,**lowercase_ : int,)-> Any: '''simple docstring''' super().__init__( lowercase_,split=lowercase_,features=lowercase_,cache_dir=lowercase_,keep_in_memory=lowercase_,streaming=lowercase_,num_proc=lowercase_,**lowercase_,) A__ = field A__ = path_or_paths if isinstance(lowercase_,lowercase_ ) else {self.split: path_or_paths} A__ = Json( cache_dir=lowercase_,data_files=lowercase_,features=lowercase_,field=lowercase_,**lowercase_,) def snake_case__ ( self : Any )-> str: '''simple docstring''' if self.streaming: A__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A__ = None A__ = None A__ = None A__ = None self.builder.download_and_prepare( download_config=lowercase_,download_mode=lowercase_,verification_mode=lowercase_,base_path=lowercase_,num_proc=self.num_proc,) A__ = self.builder.as_dataset( split=self.split,verification_mode=lowercase_,in_memory=self.keep_in_memory ) return dataset class A : """simple docstring""" def __init__( self : Tuple,lowercase_ : Dataset,lowercase_ : Union[PathLike, BinaryIO],lowercase_ : Optional[int] = None,lowercase_ : Optional[int] = None,**lowercase_ : Tuple,)-> Union[str, Any]: '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(F'num_proc {num_proc} must be an integer > 0.' ) A__ = dataset A__ = path_or_buf A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE A__ = num_proc A__ = 'utf-8' A__ = to_json_kwargs def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' A__ = self.to_json_kwargs.pop('path_or_buf',lowercase_ ) A__ = self.to_json_kwargs.pop('orient','records' ) A__ = self.to_json_kwargs.pop('lines',True if orient == 'records' else False ) A__ = self.to_json_kwargs.pop('index',False if orient in ['split', 'table'] else True ) A__ = self.to_json_kwargs.pop('compression',lowercase_ ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F'`datasets` currently does not support {compression} compression' ) if isinstance(self.path_or_buf,(str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf,'wb',compression=lowercase_ ) as buffer: A__ = self._write(file_obj=lowercase_,orient=lowercase_,lines=lowercase_,index=lowercase_,**self.to_json_kwargs ) else: if compression: raise NotImplementedError( F'The compression parameter is not supported when writing to a buffer, but compression={compression}' ' was passed. Please provide a local path instead.' ) A__ = self._write( file_obj=self.path_or_buf,orient=lowercase_,lines=lowercase_,index=lowercase_,**self.to_json_kwargs ) return written def snake_case__ ( self : List[Any],lowercase_ : int )-> Dict: '''simple docstring''' A__ , A__ , A__ , A__ , A__ = args A__ = query_table( table=self.dataset.data,key=slice(lowercase_,offset + self.batch_size ),indices=self.dataset._indices,) A__ = batch.to_pandas().to_json( path_or_buf=lowercase_,orient=lowercase_,lines=lowercase_,index=lowercase_,**lowercase_ ) if not json_str.endswith('\n' ): json_str += "\n" return json_str.encode(self.encoding ) def snake_case__ ( self : Any,lowercase_ : BinaryIO,lowercase_ : Optional[Any],lowercase_ : Optional[int],lowercase_ : Optional[Any],**lowercase_ : Optional[Any],)-> int: '''simple docstring''' A__ = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0,len(self.dataset ),self.batch_size ),unit='ba',disable=not logging.is_progress_bar_enabled(),desc='Creating json from Arrow format',): A__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(lowercase_ ) else: A__ , A__ = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0,lowercase_,lowercase_ )],),total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,unit='ba',disable=not logging.is_progress_bar_enabled(),desc='Creating json from Arrow format',): written += file_obj.write(lowercase_ ) return written
7
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'open-llama' def __init__( self : Any,lowercase_ : Optional[int]=1_0_0_0_0_0,lowercase_ : Union[str, Any]=4_0_9_6,lowercase_ : Dict=1_1_0_0_8,lowercase_ : Dict=3_2,lowercase_ : Optional[int]=3_2,lowercase_ : Dict="silu",lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Optional[int]=0.02,lowercase_ : Dict=1E-6,lowercase_ : Dict=True,lowercase_ : List[Any]=0,lowercase_ : Optional[int]=1,lowercase_ : str=2,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : int=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=True,lowercase_ : Any=None,**lowercase_ : List[Any],)-> Tuple: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = initializer_range A__ = rms_norm_eps A__ = use_cache A__ = kwargs.pop( 'use_memorry_efficient_attention',lowercase_ ) A__ = hidden_dropout_prob A__ = attention_dropout_prob A__ = use_stable_embedding A__ = shared_input_output_embedding A__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,tie_word_embeddings=lowercase_,**lowercase_,) def snake_case__ ( self : str )-> str: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling,lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'got {self.rope_scaling}' ) A__ = self.rope_scaling.get('type',lowercase_ ) A__ = self.rope_scaling.get('factor',lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(lowercase_,lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
7
1
lowercase_ = { "km/h": 1.0, "m/s": 3.6, "mph": 1.609_344, "knot": 1.852, } lowercase_ = { "km/h": 1.0, "m/s": 0.277_777_778, "mph": 0.621_371_192, "knot": 0.539_956_803, } def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> float: '''simple docstring''' if unit_to not in speed_chart or unit_from not in speed_chart_inverse: A__ = ( f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n' f'Valid values are: {", ".join(SCREAMING_SNAKE_CASE__ )}' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
7
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return EnvironmentCommand() class A ( _UpperCAmelCase ): """simple docstring""" @staticmethod def snake_case__ ( lowercase_ : ArgumentParser )-> Dict: '''simple docstring''' A__ = parser.add_parser('env' ) download_parser.set_defaults(func=lowercase_ ) def snake_case__ ( self : List[Any] )-> List[str]: '''simple docstring''' A__ = huggingface_hub.__version__ A__ = 'not installed' A__ = 'NA' if is_torch_available(): import torch A__ = torch.__version__ A__ = torch.cuda.is_available() A__ = 'not installed' if is_transformers_available(): import transformers A__ = transformers.__version__ A__ = 'not installed' if is_accelerate_available(): import accelerate A__ = accelerate.__version__ A__ = 'not installed' if is_xformers_available(): import xformers A__ = xformers.__version__ A__ = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})', 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(lowercase_ ) ) return info @staticmethod def snake_case__ ( lowercase_ : int )-> Optional[Any]: '''simple docstring''' return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
7
1
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version lowercase_ = version.parse(importlib_metadata.version("nltk")) if NLTK_VERSION >= version.Version("3.6.4"): from nltk import word_tokenize lowercase_ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n" lowercase_ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n" lowercase_ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): """simple docstring""" def snake_case__ ( self : str )-> str: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { 'predictions': datasets.Value('string',id='sequence' ), 'references': datasets.Value('string',id='sequence' ), } ),codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'],reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ],) def snake_case__ ( self : Union[str, Any],lowercase_ : Optional[int] )-> Any: '''simple docstring''' import nltk nltk.download('wordnet' ) if NLTK_VERSION >= version.Version('3.6.5' ): nltk.download('punkt' ) if NLTK_VERSION >= version.Version('3.6.6' ): nltk.download('omw-1.4' ) def snake_case__ ( self : int,lowercase_ : str,lowercase_ : Tuple,lowercase_ : Any=0.9,lowercase_ : Union[str, Any]=3,lowercase_ : Tuple=0.5 )-> Union[str, Any]: '''simple docstring''' if NLTK_VERSION >= version.Version('3.6.5' ): A__ = [ meteor_score.single_meteor_score( word_tokenize(lowercase_ ),word_tokenize(lowercase_ ),alpha=lowercase_,beta=lowercase_,gamma=lowercase_ ) for ref, pred in zip(lowercase_,lowercase_ ) ] else: A__ = [ meteor_score.single_meteor_score(lowercase_,lowercase_,alpha=lowercase_,beta=lowercase_,gamma=lowercase_ ) for ref, pred in zip(lowercase_,lowercase_ ) ] return {"meteor": np.mean(lowercase_ )}
7
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ReformerTokenizer lowerCamelCase = ReformerTokenizerFast lowerCamelCase = True lowerCamelCase = False lowerCamelCase = True def snake_case__ ( self : Any )-> str: '''simple docstring''' super().setUp() A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : Optional[int] )-> Optional[int]: '''simple docstring''' A__ = '<s>' A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0],'<unk>' ) self.assertEqual(vocab_keys[1],'<s>' ) self.assertEqual(vocab_keys[-1],'j' ) self.assertEqual(len(lowercase_ ),1_0_0_0 ) def snake_case__ ( self : Dict )-> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size,1_0_0_0 ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = 'I was born in 92000, and this is falsé.' A__ = tokenizer.tokenize(lowercase_ ) A__ = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(lowercase_ ) A__ = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) def snake_case__ ( self : int,lowercase_ : Optional[int]=1_5 )-> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ ) # Simple input A__ = 'This is a simple input' A__ = ['This is a simple input 1', 'This is a simple input 2'] A__ = ('This is a simple input', 'This is a pair') A__ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' ) # Simple input self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' ) # Simple input self.assertRaises( lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',) # Pair input self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' ) # Pair input self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' ) # Pair input self.assertRaises( lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',) def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' pass def snake_case__ ( self : Dict )-> str: '''simple docstring''' A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ ) A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ),[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2],) A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ],) A__ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4],) A__ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ],) @cached_property def snake_case__ ( self : Optional[int] )-> Any: '''simple docstring''' return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' ) @slow def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = 'Hello World!' A__ = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7] self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) ) @slow def snake_case__ ( self : Optional[int] )-> str: '''simple docstring''' A__ = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) A__ = [ 1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 3_5, 2_8, 2_7_5, 3, 2_5_9, 2_9_7, 2_6_0, 8_4, 4, 3_5, 1_1_0, 4_4, 8, 2_5_9, 9_1, 2_6_8, 2_1, 1_1, 2_0_9, 2_7_4, 1_0_9, 2_6_6, 2_7_7, 1_1_7, 8_6, 9_3, 3_1_5, 2_5_8, 2_7_8, 2_5_8, 2_7_7, 2_5_8, 0, 2_5_8, 2_8_8, 2_5_8, 3_1_9, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 2_8_7, 2_5_8, 3_1_5, 2_5_8, 2_8_9, 2_5_8, 2_7_8, 9_9, 2_6_9, 2_6_6, 2_6_2, 8, 2_5_9, 2_4_1, 4, 2_1_7, 2_3_0, 2_6_8, 2_6_6, 5_5, 1_6_8, 1_0_6, 7_5, 1_9_3, 2_6_6, 2_2_3, 2_7, 4_9, 2_6, 2_8_2, 2_5, 2_6_4, 2_9_9, 1_9, 2_6, 0, 2_5_8, 2_7_7, 1_1_7, 8_6, 9_3, 1_7_6, 1_8_3, 2_7_0, 1_1, 2_6_2, 4_2, 6_1, 2_6_5, ] self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) ) @require_torch @slow def snake_case__ ( self : int )-> Any: '''simple docstring''' import torch from transformers import ReformerConfig, ReformerModel # Build sequence A__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0] A__ = ' '.join(lowercase_ ) A__ = self.big_tokenizer.encode_plus(lowercase_,return_tensors='pt' ) A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors='pt' ) A__ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) A__ = encoded_sequence['input_ids'].shape A__ = ReformerModel(lowercase_ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase_ ) model(**lowercase_ ) @slow def snake_case__ ( self : int )-> Tuple: '''simple docstring''' A__ = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 A__ = [ 'This is a very simple sentence.', 'The quick brown fox jumps over the lazy dog.', ] self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='google/reformer-crime-and-punishment',revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a',padding=lowercase_,sequences=lowercase_,)
7
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class A ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Any )-> List[str]: '''simple docstring''' A__ = XLMRobertaModel.from_pretrained('xlm-roberta-base' ) A__ = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house A__ = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim A__ = torch.tensor( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A__ = model(lowercase_ )['last_hidden_state'].detach() self.assertEqual(output.shape,lowercase_ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1],lowercase_,atol=1E-3 ) ) @slow def snake_case__ ( self : Any )-> int: '''simple docstring''' A__ = XLMRobertaModel.from_pretrained('xlm-roberta-large' ) A__ = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house A__ = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim A__ = torch.tensor( [[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A__ = model(lowercase_ )['last_hidden_state'].detach() self.assertEqual(output.shape,lowercase_ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1],lowercase_,atol=1E-3 ) )
7
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , ) -> float: '''simple docstring''' A__ = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: A__ = 1 - (matter_density + radiation_density + dark_energy) A__ = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) A__ = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation lowercase_ = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
7
1
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right lowercase_ = 128022 lowercase_ = 128028 @require_sentencepiece class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = MaMaaaTokenizer lowerCamelCase = False lowerCamelCase = False lowerCamelCase = True def snake_case__ ( self : Tuple )-> Dict: '''simple docstring''' super().setUp() A__ = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>'] A__ = dict(zip(lowercase_,range(len(lowercase_ ) ) ) ) A__ = Path(self.tmpdirname ) save_json(lowercase_,save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowercase_,save_dir / VOCAB_FILES_NAMES['spm_file'] ) A__ = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : Tuple,**lowercase_ : Any )-> Any: '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname,**lowercase_ ) def snake_case__ ( self : Dict,lowercase_ : List[Any] )-> List[str]: '''simple docstring''' return ( "This is a test", "This is a test", ) def snake_case__ ( self : Tuple )-> Optional[Any]: '''simple docstring''' A__ = '</s>' A__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = self.get_tokenizer() A__ = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0],'</s>' ) self.assertEqual(vocab_keys[1],'<unk>' ) self.assertEqual(vocab_keys[-1],'<s>' ) self.assertEqual(len(lowercase_ ),tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('Skip this test while all models are still to be uploaded.' ) def snake_case__ ( self : str )-> str: '''simple docstring''' pass def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' A__ = self.get_tokenizer() A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ),[2, 3, 4, 5, 6],) A__ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] ) A__ = tokenizer.convert_tokens_to_string(lowercase_ ) self.assertEqual(lowercase_,'This is a test' ) @slow def snake_case__ ( self : Dict )-> Union[str, Any]: '''simple docstring''' A__ = {'input_ids': [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='facebook/m2m100_418M',revision='c168bae485c864188cf9aa0e4108b0b6934dc91e',) @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): """simple docstring""" lowerCamelCase = 'facebook/m2m100_418M' lowerCamelCase = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowerCamelCase = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowerCamelCase = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2] @classmethod def snake_case__ ( cls : Optional[Any] )-> Optional[Any]: '''simple docstring''' A__ = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name,src_lang='en',tgt_lang='fr' ) A__ = 1 return cls def snake_case__ ( self : Union[str, Any] )-> List[str]: '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('ar' ),1_2_8_0_0_6 ) self.assertEqual(self.tokenizer.get_lang_id('en' ),1_2_8_0_2_2 ) self.assertEqual(self.tokenizer.get_lang_id('ro' ),1_2_8_0_7_6 ) self.assertEqual(self.tokenizer.get_lang_id('mr' ),1_2_8_0_6_3 ) def snake_case__ ( self : Any )-> Optional[int]: '''simple docstring''' A__ = self.tokenizer.get_vocab() self.assertEqual(len(lowercase_ ),self.tokenizer.vocab_size ) self.assertEqual(vocab['<unk>'],3 ) self.assertIn(self.tokenizer.get_lang_token('en' ),lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> int: '''simple docstring''' A__ = 'en' A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens,lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' self.assertIn(lowercase_,self.tokenizer.all_special_ids ) # fmt: off A__ = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2] # fmt: on A__ = self.tokenizer.decode(lowercase_,skip_special_tokens=lowercase_ ) A__ = self.tokenizer.decode(generated_ids[1:],skip_special_tokens=lowercase_ ) self.assertEqual(lowercase_,lowercase_ ) self.assertNotIn(self.tokenizer.eos_token,lowercase_ ) def snake_case__ ( self : List[str] )-> int: '''simple docstring''' A__ = tempfile.mkdtemp() A__ = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(lowercase_ ) A__ = MaMaaaTokenizer.from_pretrained(lowercase_ ) self.assertDictEqual(new_tok.lang_token_to_id,lowercase_ ) @require_torch def snake_case__ ( self : List[Any] )-> List[Any]: '''simple docstring''' A__ = 'en' A__ = 'fr' A__ = self.tokenizer(self.src_text,text_target=self.tgt_text,padding=lowercase_,return_tensors='pt' ) A__ = shift_tokens_right( batch['labels'],self.tokenizer.pad_token_id,self.tokenizer.eos_token_id ) for k in batch: A__ = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def snake_case__ ( self : Optional[Any] )-> Union[str, Any]: '''simple docstring''' A__ = 'mr' self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] ) A__ = 'zh' self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] ) @require_torch def snake_case__ ( self : Optional[Any] )-> List[str]: '''simple docstring''' A__ = 'mr' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) A__ = 'zh' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens,[self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def snake_case__ ( self : Union[str, Any] )-> Any: '''simple docstring''' A__ = self.tokenizer._build_translation_inputs('A test',return_tensors='pt',src_lang='en',tgt_lang='ar' ) self.assertEqual( nested_simplify(lowercase_ ),{ # en_XX, A, test, EOS 'input_ids': [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 1_2_8_0_0_6, },)
7
from typing import Union import fire import torch from tqdm import tqdm def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str = "cpu" , SCREAMING_SNAKE_CASE__ : Union[str, None] = None ) -> None: '''simple docstring''' A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ ) for k, v in tqdm(state_dict.items() ): if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' ) A__ = v.half() if save_path is None: # overwrite src_path A__ = src_path torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": fire.Fire(convert)
7
1
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int = 0 ) -> list: '''simple docstring''' A__ = length or len(SCREAMING_SNAKE_CASE__ ) A__ = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: A__ , A__ = list_data[i + 1], list_data[i] A__ = True return list_data if not swapped else bubble_sort(SCREAMING_SNAKE_CASE__ , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
7
import os # Precomputes a list of the 100 first triangular numbers lowercase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def _snake_case( ) -> int: '''simple docstring''' A__ = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE__ ) ) A__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'words.txt' ) A__ = '' with open(SCREAMING_SNAKE_CASE__ ) as f: A__ = f.readline() A__ = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A__ = [ word for word in [sum(ord(SCREAMING_SNAKE_CASE__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": print(solution())
7
1
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class A : """simple docstring""" def __init__( self : Optional[Any],lowercase_ : Optional[int],lowercase_ : Optional[int]=2,lowercase_ : Union[str, Any]=8,lowercase_ : int=True,lowercase_ : Union[str, Any]=True,lowercase_ : str=True,lowercase_ : str=True,lowercase_ : int=9_9,lowercase_ : int=1_6,lowercase_ : Union[str, Any]=5,lowercase_ : List[Any]=2,lowercase_ : Optional[Any]=3_6,lowercase_ : List[Any]="gelu",lowercase_ : str=0.0,lowercase_ : List[str]=0.0,lowercase_ : str=5_1_2,lowercase_ : Optional[int]=1_6,lowercase_ : int=2,lowercase_ : Optional[int]=0.02,lowercase_ : Union[str, Any]=3,lowercase_ : Tuple=4,lowercase_ : str=None,)-> str: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope def snake_case__ ( self : List[Any] )-> Any: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size ) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) A__ = ids_tensor([self.batch_size],self.num_choices ) A__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' return MraConfig( vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,) def snake_case__ ( self : Union[str, Any] )-> Optional[int]: '''simple docstring''' A__ = self.get_config() A__ = 3_0_0 return config def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = self.prepare_config_and_inputs() A__ = True A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A__ = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def snake_case__ ( self : int,lowercase_ : Dict,lowercase_ : Optional[int],lowercase_ : Tuple,lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Tuple,lowercase_ : Tuple )-> str: '''simple docstring''' A__ = MraModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ ) A__ = model(lowercase_,token_type_ids=lowercase_ ) A__ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Dict,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : Tuple,lowercase_ : Dict,lowercase_ : Optional[Any],lowercase_ : Optional[Any],lowercase_ : Dict,lowercase_ : str,lowercase_ : Dict,)-> List[str]: '''simple docstring''' A__ = True A__ = MraModel(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model( lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,encoder_hidden_states=lowercase_,encoder_attention_mask=lowercase_,) A__ = model( lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,encoder_hidden_states=lowercase_,) A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : List[Any],lowercase_ : List[str],lowercase_ : Any,lowercase_ : int,lowercase_ : Optional[Any],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : str )-> Optional[int]: '''simple docstring''' A__ = MraForMaskedLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : int,lowercase_ : int,lowercase_ : Optional[Any],lowercase_ : Tuple,lowercase_ : Dict,lowercase_ : Union[str, Any],lowercase_ : Dict,lowercase_ : Optional[Any] )-> Dict: '''simple docstring''' A__ = MraForQuestionAnswering(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model( lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,start_positions=lowercase_,end_positions=lowercase_,) self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def snake_case__ ( self : Optional[Any],lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : Any )-> Optional[int]: '''simple docstring''' A__ = self.num_labels A__ = MraForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def snake_case__ ( self : List[Any],lowercase_ : List[str],lowercase_ : Optional[Any],lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Any,lowercase_ : Optional[int],lowercase_ : int )-> int: '''simple docstring''' A__ = self.num_labels A__ = MraForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : Dict,lowercase_ : List[Any],lowercase_ : int,lowercase_ : Tuple,lowercase_ : Dict,lowercase_ : Tuple,lowercase_ : str,lowercase_ : List[str] )-> List[str]: '''simple docstring''' A__ = self.num_choices A__ = MraForMultipleChoice(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous() A__ = token_type_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous() A__ = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous() A__ = model( lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_,labels=lowercase_,) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) ) def snake_case__ ( self : Dict )-> int: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = () def snake_case__ ( self : Any )-> List[Any]: '''simple docstring''' A__ = MraModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self : List[Any] )-> str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : int )-> int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A__ = type self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : Optional[int] )-> str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase_ ) def snake_case__ ( self : Optional[Any] )-> Any: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase_ ) def snake_case__ ( self : Optional[Any] )-> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) @slow def snake_case__ ( self : Any )-> List[Any]: '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = MraModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @unittest.skip(reason='MRA does not output attentions' ) def snake_case__ ( self : Tuple )-> str: '''simple docstring''' return @require_torch class A ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : int )-> str: '''simple docstring''' A__ = MraModel.from_pretrained('uw-madison/mra-base-512-4' ) A__ = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): A__ = model(lowercase_ )[0] A__ = torch.Size((1, 2_5_6, 7_6_8) ) self.assertEqual(output.shape,lowercase_ ) A__ = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3],lowercase_,atol=1E-4 ) ) @slow def snake_case__ ( self : Optional[int] )-> Any: '''simple docstring''' A__ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' ) A__ = torch.arange(2_5_6 ).unsqueeze(0 ) with torch.no_grad(): A__ = model(lowercase_ )[0] A__ = 5_0_2_6_5 A__ = torch.Size((1, 2_5_6, vocab_size) ) self.assertEqual(output.shape,lowercase_ ) A__ = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3],lowercase_,atol=1E-4 ) ) @slow def snake_case__ ( self : List[str] )-> Union[str, Any]: '''simple docstring''' A__ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' ) A__ = torch.arange(4_0_9_6 ).unsqueeze(0 ) with torch.no_grad(): A__ = model(lowercase_ )[0] A__ = 5_0_2_6_5 A__ = torch.Size((1, 4_0_9_6, vocab_size) ) self.assertEqual(output.shape,lowercase_ ) A__ = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3],lowercase_,atol=1E-4 ) )
7
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin lowercase_ = False @skip_mps class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = StableDiffusionAttendAndExcitePipeline lowerCamelCase = False lowerCamelCase = TEXT_TO_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} ) lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def snake_case__ ( cls : Any )-> Optional[Any]: '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(lowercase_ ) @classmethod def snake_case__ ( cls : Optional[Any] )-> Dict: '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(lowercase_ ) def snake_case__ ( self : List[str] )-> int: '''simple docstring''' torch.manual_seed(0 ) A__ = UNetaDConditionModel( block_out_channels=(3_2, 6_4),layers_per_block=1,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'),up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'),cross_attention_dim=3_2,attention_head_dim=(2, 4),use_linear_projection=lowercase_,) A__ = DDIMScheduler( beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,) torch.manual_seed(0 ) A__ = AutoencoderKL( block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,) torch.manual_seed(0 ) A__ = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,) A__ = CLIPTextModel(lowercase_ ) A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) A__ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : List[Any]=0 )-> int: '''simple docstring''' if str(lowercase_ ).startswith('mps' ): A__ = torch.manual_seed(lowercase_ ) else: A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) A__ = A__ = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def snake_case__ ( self : List[str] )-> Optional[Any]: '''simple docstring''' A__ = 'cpu' A__ = self.get_dummy_components() A__ = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) A__ = self.get_dummy_inputs(lowercase_ ) A__ = pipe(**lowercase_ ).images A__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape,(1, 6_4, 6_4, 3) ) A__ = np.array( [0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] ) A__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_,1E-3 ) def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def snake_case__ ( self : str )-> int: '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def snake_case__ ( self : str )-> Optional[int]: '''simple docstring''' self._test_inference_batch_single_identical(batch_size=2,expected_max_diff=7E-4 ) def snake_case__ ( self : Optional[Any] )-> int: '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def snake_case__ ( self : Dict )-> Any: '''simple docstring''' super().test_save_load_local(expected_max_difference=5E-4 ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class A ( unittest.TestCase ): """simple docstring""" @classmethod def snake_case__ ( cls : Any )-> Optional[int]: '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(lowercase_ ) @classmethod def snake_case__ ( cls : int )-> List[Any]: '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(lowercase_ ) def snake_case__ ( self : List[Any] )-> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' A__ = torch.manual_seed(5_1 ) A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4',safety_checker=lowercase_,torch_dtype=torch.floataa ) pipe.to('cuda' ) A__ = 'a painting of an elephant with glasses' A__ = [5, 7] A__ = pipe( prompt=lowercase_,token_indices=lowercase_,guidance_scale=7.5,generator=lowercase_,num_inference_steps=5,max_iter_to_alter=5,output_type='numpy',).images[0] A__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' ) assert np.abs((expected_image - image).max() ) < 5E-1
7
1
import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = DebertaTokenizer lowerCamelCase = True lowerCamelCase = DebertaTokenizerFast def snake_case__ ( self : str )-> Optional[int]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '[UNK]', ] A__ = dict(zip(lowercase_,range(len(lowercase_ ) ) ) ) A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A__ = {'unk_token': '[UNK]'} A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] ) A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file,'w',encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase_ ) + '\n' ) with open(self.merges_file,'w',encoding='utf-8' ) as fp: fp.write('\n'.join(lowercase_ ) ) def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname,**lowercase_ ) def snake_case__ ( self : int,lowercase_ : List[Any] )-> Any: '''simple docstring''' A__ = 'lower newer' A__ = 'lower newer' return input_text, output_text def snake_case__ ( self : Optional[Any] )-> Dict: '''simple docstring''' A__ = self.get_tokenizer() A__ = 'lower newer' A__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] A__ = tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = tokens + [tokenizer.unk_token] A__ = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),lowercase_ ) def snake_case__ ( self : int )-> Dict: '''simple docstring''' A__ = self.get_tokenizer() A__ = tokenizer('Hello','World' ) A__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['token_type_ids'],lowercase_ ) @slow def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' A__ = self.tokenizer_class.from_pretrained('microsoft/deberta-base' ) A__ = tokenizer.encode('sequence builders',add_special_tokens=lowercase_ ) A__ = tokenizer.encode('multi-sequence build',add_special_tokens=lowercase_ ) A__ = tokenizer.encode( 'sequence builders',add_special_tokens=lowercase_,add_prefix_space=lowercase_ ) A__ = tokenizer.encode( 'sequence builders','multi-sequence build',add_special_tokens=lowercase_,add_prefix_space=lowercase_ ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def snake_case__ ( self : str )-> List[str]: '''simple docstring''' A__ = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: A__ = tokenizer_class.from_pretrained('microsoft/deberta-base' ) A__ = [ 'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations', 'ALBERT incorporates two parameter reduction techniques', 'The first one is a factorized embedding parameterization. By decomposing the large vocabulary' ' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of' ' vocabulary embedding.', ] A__ = tokenizer(lowercase_,padding=lowercase_ ) A__ = [tokenizer.decode(lowercase_,skip_special_tokens=lowercase_ ) for seq in encoding['input_ids']] # fmt: off A__ = { 'input_ids': [ [1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2] ], 'token_type_ids': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], 'attention_mask': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on A__ = [ 'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations', 'ALBERT incorporates two parameter reduction techniques', 'The first one is a factorized embedding parameterization. By decomposing the large vocabulary' ' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of' ' vocabulary embedding.', ] self.assertDictEqual(encoding.data,lowercase_ ) for expected, decoded in zip(lowercase_,lowercase_ ): self.assertEqual(lowercase_,lowercase_ )
7
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowercase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : tuple , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , ) -> Union[str, Any]: '''simple docstring''' output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , ) else: export( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , ) @torch.no_grad() def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> Tuple: '''simple docstring''' A__ = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): A__ = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: A__ = 'cpu' A__ = Path(SCREAMING_SNAKE_CASE__ ) # VAE DECODER A__ = AutoencoderKL.from_pretrained(model_path + '/vae' ) A__ = vae_decoder.config.latent_channels # forward only through the decoder part A__ = vae_decoder.decode onnx_export( SCREAMING_SNAKE_CASE__ , model_args=( torch.randn(1 , SCREAMING_SNAKE_CASE__ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=SCREAMING_SNAKE_CASE__ , ) del vae_decoder if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowercase_ = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
7
1
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowercase_ = "0.12" # assumed parallelism: 8 @require_flax @is_staging_test class A ( unittest.TestCase ): """simple docstring""" @classmethod def snake_case__ ( cls : int )-> Union[str, Any]: '''simple docstring''' A__ = TOKEN HfFolder.save_token(lowercase_ ) @classmethod def snake_case__ ( cls : Optional[Any] )-> Optional[Any]: '''simple docstring''' try: delete_repo(token=cls._token,repo_id='test-model-flax' ) except HTTPError: pass try: delete_repo(token=cls._token,repo_id='valid_org/test-model-flax-org' ) except HTTPError: pass def snake_case__ ( self : Optional[Any] )-> Union[str, Any]: '''simple docstring''' A__ = BertConfig( vocab_size=9_9,hidden_size=3_2,num_hidden_layers=5,num_attention_heads=4,intermediate_size=3_7 ) A__ = FlaxBertModel(lowercase_ ) model.push_to_hub('test-model-flax',use_auth_token=self._token ) A__ = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' ) A__ = flatten_dict(unfreeze(model.params ) ) A__ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A__ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowercase_,1E-3,msg=F'{key} not identical' ) # Reset repo delete_repo(token=self._token,repo_id='test-model-flax' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowercase_,repo_id='test-model-flax',push_to_hub=lowercase_,use_auth_token=self._token ) A__ = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' ) A__ = flatten_dict(unfreeze(model.params ) ) A__ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A__ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowercase_,1E-3,msg=F'{key} not identical' ) def snake_case__ ( self : Any )-> List[Any]: '''simple docstring''' A__ = BertConfig( vocab_size=9_9,hidden_size=3_2,num_hidden_layers=5,num_attention_heads=4,intermediate_size=3_7 ) A__ = FlaxBertModel(lowercase_ ) model.push_to_hub('valid_org/test-model-flax-org',use_auth_token=self._token ) A__ = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' ) A__ = flatten_dict(unfreeze(model.params ) ) A__ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A__ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowercase_,1E-3,msg=F'{key} not identical' ) # Reset repo delete_repo(token=self._token,repo_id='valid_org/test-model-flax-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( lowercase_,repo_id='valid_org/test-model-flax-org',push_to_hub=lowercase_,use_auth_token=self._token ) A__ = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' ) A__ = flatten_dict(unfreeze(model.params ) ) A__ = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): A__ = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowercase_,1E-3,msg=F'{key} not identical' ) def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: '''simple docstring''' A__ = True A__ = flatten_dict(modela.params ) A__ = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: A__ = False return models_are_equal @require_flax class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : int )-> Tuple: '''simple docstring''' A__ = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' ) A__ = FlaxBertModel(lowercase_ ) A__ = 'bert' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(lowercase_,lowercase_ ) ) with self.assertRaises(lowercase_ ): A__ = FlaxBertModel.from_pretrained(lowercase_ ) A__ = FlaxBertModel.from_pretrained(lowercase_,subfolder=lowercase_ ) self.assertTrue(check_models_equal(lowercase_,lowercase_ ) ) def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' ) A__ = FlaxBertModel(lowercase_ ) A__ = 'bert' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(lowercase_,lowercase_ ),max_shard_size='10KB' ) with self.assertRaises(lowercase_ ): A__ = FlaxBertModel.from_pretrained(lowercase_ ) A__ = FlaxBertModel.from_pretrained(lowercase_,subfolder=lowercase_ ) self.assertTrue(check_models_equal(lowercase_,lowercase_ ) ) def snake_case__ ( self : Optional[Any] )-> Dict: '''simple docstring''' A__ = 'bert' A__ = 'hf-internal-testing/tiny-random-bert-subfolder' with self.assertRaises(lowercase_ ): A__ = FlaxBertModel.from_pretrained(lowercase_ ) A__ = FlaxBertModel.from_pretrained(lowercase_,subfolder=lowercase_ ) self.assertIsNotNone(lowercase_ ) def snake_case__ ( self : Optional[int] )-> int: '''simple docstring''' A__ = 'bert' A__ = 'hf-internal-testing/tiny-random-bert-sharded-subfolder' with self.assertRaises(lowercase_ ): A__ = FlaxBertModel.from_pretrained(lowercase_ ) A__ = FlaxBertModel.from_pretrained(lowercase_,subfolder=lowercase_ ) self.assertIsNotNone(lowercase_ )
7
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = (DPMSolverSinglestepScheduler,) lowerCamelCase = (('num_inference_steps', 25),) def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Optional[int]: '''simple docstring''' A__ = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'prediction_type': 'epsilon', 'thresholding': False, 'sample_max_value': 1.0, 'algorithm_type': 'dpmsolver++', 'solver_type': 'midpoint', 'lambda_min_clipped': -float('inf' ), 'variance_type': None, } config.update(**lowercase_ ) return config def snake_case__ ( self : str,lowercase_ : Optional[Any]=0,**lowercase_ : Any )-> List[Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ , A__ = sample, sample for t in range(lowercase_,time_step + scheduler.config.solver_order + 1 ): A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : List[str] )-> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any]=0,**lowercase_ : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=None,**lowercase_ : int )-> int: '''simple docstring''' if scheduler is None: A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample return sample def snake_case__ ( self : Any )-> str: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = 5_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_574 ) < 1E-3 def snake_case__ ( self : Optional[Any] )-> List[Any]: '''simple docstring''' for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowercase_ ) def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 A__ = DEISMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverMultistepScheduler.from_config(scheduler.config ) A__ = UniPCMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Tuple )-> Any: '''simple docstring''' self.check_over_configs(thresholding=lowercase_ ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowercase_,prediction_type=lowercase_,sample_max_value=lowercase_,algorithm_type='dpmsolver++',solver_order=lowercase_,solver_type=lowercase_,) def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) A__ = self.full_loop( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers" def snake_case__ ( self : Optional[int] )-> Tuple: '''simple docstring''' self.check_over_configs(lower_order_final=lowercase_ ) self.check_over_configs(lower_order_final=lowercase_ ) def snake_case__ ( self : Tuple )-> Optional[int]: '''simple docstring''' self.check_over_configs(lambda_min_clipped=-float('inf' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def snake_case__ ( self : Optional[Any] )-> Tuple: '''simple docstring''' self.check_over_configs(variance_type=lowercase_ ) self.check_over_configs(variance_type='learned_range' ) def snake_case__ ( self : str )-> Any: '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=lowercase_,time_step=0 ) def snake_case__ ( self : Tuple )-> Tuple: '''simple docstring''' A__ = self.full_loop() A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Any )-> Union[str, Any]: '''simple docstring''' A__ = self.full_loop(use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_248 ) < 1E-3 def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction' ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.1_453 ) < 1E-3 def snake_case__ ( self : Tuple )-> int: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction',use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.0_649 ) < 1E-3 def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(thresholding=lowercase_,dynamic_thresholding_ratio=0 ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter.half() scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample assert sample.dtype == torch.floataa
7
1
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy lowercase_ = logging.getLogger(__name__) def _snake_case( SCREAMING_SNAKE_CASE__ : torch.nn.Module , SCREAMING_SNAKE_CASE__ : BnbQuantizationConfig , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE__ : bool = False , ) -> List[str]: '''simple docstring''' A__ = bnb_quantization_config.load_in_abit A__ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( 'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,' ' make sure you have the latest version of `bitsandbytes` installed.' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( 'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,' 'make sure you have the latest version of `bitsandbytes` installed.' ) A__ = [] # custom device map if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(device_map.keys() ) > 1: A__ = [key for key, value in device_map.items() if value in ['disk', 'cpu']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: A__ = get_keys_to_not_convert(SCREAMING_SNAKE_CASE__ ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE__ ) A__ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: A__ = [] A__ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(SCREAMING_SNAKE_CASE__ ) # compatibility with peft A__ = load_in_abit A__ = load_in_abit A__ = get_parameter_device(SCREAMING_SNAKE_CASE__ ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( 'It is not recommended to quantize a loaded model. ' 'The model should be instantiated under the `init_empty_weights` context manager.' ) A__ = replace_with_bnb_layers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ ) # convert param to the right dtype A__ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: A__ = name.replace('.weight' , '' ).replace('.bias' , '' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(SCREAMING_SNAKE_CASE__ ): param.to(SCREAMING_SNAKE_CASE__ ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info( f'The model device type is {model_device.type}. However, cuda is needed for quantization.' 'We move the model to cuda.' ) return model elif weights_location is None: raise RuntimeError( f'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' ) else: with init_empty_weights(): A__ = replace_with_bnb_layers( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , modules_to_not_convert=SCREAMING_SNAKE_CASE__ ) A__ = get_quantized_model_device_map( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , max_memory=SCREAMING_SNAKE_CASE__ , no_split_module_classes=SCREAMING_SNAKE_CASE__ , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): A__ = True A__ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] ) load_checkpoint_in_model( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE__ , offload_state_dict=SCREAMING_SNAKE_CASE__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(SCREAMING_SNAKE_CASE__ , device_map=SCREAMING_SNAKE_CASE__ , offload_dir=SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : str=None ) -> List[str]: '''simple docstring''' if device_map is None: if torch.cuda.is_available(): A__ = {'': torch.cuda.current_device()} else: raise RuntimeError('No GPU found. A GPU is needed for quantization.' ) logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( 'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ' '\'sequential\'.' ) A__ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) A__ = {} A__ = special_dtypes A__ = no_split_module_classes A__ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": A__ = get_balanced_memory( SCREAMING_SNAKE_CASE__ , low_zero=(device_map == 'balanced_low_0') , max_memory=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) A__ = max_memory A__ = infer_auto_device_map(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # check if don't have any quantized module on the cpu A__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules A__ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( '\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' ) else: logger.info( 'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' ) del device_map_without_some_modules return device_map def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None ) -> Union[str, Any]: '''simple docstring''' if modules_to_not_convert is None: A__ = [] A__ , A__ = _replace_with_bnb_layers( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not has_been_replaced: logger.warning( 'You are loading your model in 8bit or 4bit but no linear modules were found in your model.' ' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.' ' Please double check your model architecture, or submit an issue on github if you think this is' ' a bug.' ) return model def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Union[str, Any]: '''simple docstring''' A__ = False for name, module in model.named_children(): if current_key_name is None: A__ = [] current_key_name.append(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` A__ = '.'.join(SCREAMING_SNAKE_CASE__ ) A__ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: A__ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: A__ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE__ , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: A__ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' ) A__ = module.weight.data if module.bias is not None: A__ = module.bias.data bnb_module.requires_grad_(SCREAMING_SNAKE_CASE__ ) setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = True if len(list(module.children() ) ) > 0: A__ , A__ = _replace_with_bnb_layers( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> str: '''simple docstring''' with init_empty_weights(): A__ = deepcopy(SCREAMING_SNAKE_CASE__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` A__ = find_tied_parameters(SCREAMING_SNAKE_CASE__ ) # For compatibility with Accelerate < 0.18 if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A__ = sum(SCREAMING_SNAKE_CASE__ , [] ) A__ = len(SCREAMING_SNAKE_CASE__ ) > 0 # Check if it is a base model A__ = False if hasattr(SCREAMING_SNAKE_CASE__ , 'base_model_prefix' ): A__ = not hasattr(SCREAMING_SNAKE_CASE__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A__ = list(model.named_children() ) A__ = [list_modules[-1][0]] # add last module together with tied weights A__ = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) A__ = list(set(SCREAMING_SNAKE_CASE__ ) ) + list(SCREAMING_SNAKE_CASE__ ) # remove ".weight" from the keys A__ = ['.weight', '.bias'] A__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A__ = name.replace(SCREAMING_SNAKE_CASE__ , '' ) filtered_module_names.append(SCREAMING_SNAKE_CASE__ ) return filtered_module_names def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> str: '''simple docstring''' for m in model.modules(): if isinstance(SCREAMING_SNAKE_CASE__ , bnb.nn.Linearabit ): return True return False def _snake_case( SCREAMING_SNAKE_CASE__ : nn.Module ) -> Union[str, Any]: '''simple docstring''' return next(parameter.parameters() ).device def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]: '''simple docstring''' if fpaa_statistics is None: set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , dtype=SCREAMING_SNAKE_CASE__ , value=SCREAMING_SNAKE_CASE__ ) A__ = param_name A__ = model if "." in tensor_name: A__ = tensor_name.split('.' ) for split in splits[:-1]: A__ = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if new_module is None: raise ValueError(f'{module} has no attribute {split}.' ) A__ = new_module A__ = splits[-1] # offload weights A__ = False offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ ) if hasattr(module._parameters[tensor_name] , 'SCB' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ , ) else: offload_weight(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ ) offload_weight(SCREAMING_SNAKE_CASE__ , param_name.replace('weight' , 'SCB' ) , SCREAMING_SNAKE_CASE__ , index=SCREAMING_SNAKE_CASE__ ) set_module_tensor_to_device(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'meta' , dtype=SCREAMING_SNAKE_CASE__ , value=torch.empty(*param.size() ) )
7
class A : """simple docstring""" def __init__( self : Any,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : List[str] )-> List[Any]: '''simple docstring''' A__ = name A__ = value A__ = weight def __repr__( self : int )-> Tuple: '''simple docstring''' return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def snake_case__ ( self : Any )-> str: '''simple docstring''' return self.value def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' return self.name def snake_case__ ( self : Any )-> Dict: '''simple docstring''' return self.weight def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' return self.value / self.weight def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: '''simple docstring''' A__ = [] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Any: '''simple docstring''' A__ = sorted(SCREAMING_SNAKE_CASE__ , key=SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ) A__ = [] A__ , A__ = 0.0, 0.0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _snake_case( ) -> Any: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
7
1
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class A ( ctypes.Structure ): """simple docstring""" lowerCamelCase = [('size', ctypes.c_int), ('visible', ctypes.c_byte)] def _snake_case( ) -> Optional[int]: '''simple docstring''' if os.name == "nt": A__ = CursorInfo() A__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) ) A__ = False ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def _snake_case( ) -> Optional[int]: '''simple docstring''' if os.name == "nt": A__ = CursorInfo() A__ = ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) ) A__ = True ctypes.windll.kernelaa.SetConsoleCursorInfo(SCREAMING_SNAKE_CASE__ , ctypes.byref(SCREAMING_SNAKE_CASE__ ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def _snake_case( ) -> Union[str, Any]: '''simple docstring''' try: hide_cursor() yield finally: show_cursor()
7
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase_ = logging.get_logger(__name__) lowercase_ = { "microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json", } class A ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'resnet' lowerCamelCase = ['basic', 'bottleneck'] def __init__( self : Optional[Any],lowercase_ : int=3,lowercase_ : List[str]=6_4,lowercase_ : int=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8],lowercase_ : Tuple=[3, 4, 6, 3],lowercase_ : Union[str, Any]="bottleneck",lowercase_ : List[str]="relu",lowercase_ : Tuple=False,lowercase_ : List[str]=None,lowercase_ : List[Any]=None,**lowercase_ : str,)-> Optional[Any]: '''simple docstring''' super().__init__(**lowercase_ ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) A__ = num_channels A__ = embedding_size A__ = hidden_sizes A__ = depths A__ = layer_type A__ = hidden_act A__ = downsample_in_first_stage A__ = ['stem'] + [F'stage{idx}' for idx in range(1,len(lowercase_ ) + 1 )] A__ , A__ = get_aligned_output_features_output_indices( out_features=lowercase_,out_indices=lowercase_,stage_names=self.stage_names ) class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = version.parse('1.11' ) @property def snake_case__ ( self : List[Any] )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def snake_case__ ( self : Any )-> float: '''simple docstring''' return 1E-3
7
1
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(_UpperCAmelCase ) , 'Tatoeba directory does not exist.' ) class A ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : str )-> Union[str, Any]: '''simple docstring''' A__ = tempfile.mkdtemp() return TatoebaConverter(save_dir=lowercase_ ) @slow def snake_case__ ( self : List[str] )-> Optional[int]: '''simple docstring''' self.resolver.convert_models(['heb-eng'] ) @slow def snake_case__ ( self : int )-> Optional[int]: '''simple docstring''' A__ , A__ = self.resolver.write_model_card('opus-mt-he-en',dry_run=lowercase_ ) assert mmeta["long_pair"] == "heb-eng"
7
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 't5' lowerCamelCase = ['past_key_values'] lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Union[str, Any],lowercase_ : int=3_2_1_2_8,lowercase_ : int=5_1_2,lowercase_ : List[str]=6_4,lowercase_ : Tuple=2_0_4_8,lowercase_ : Any=6,lowercase_ : List[str]=None,lowercase_ : Union[str, Any]=8,lowercase_ : int=3_2,lowercase_ : Dict=1_2_8,lowercase_ : Optional[int]=0.1,lowercase_ : List[str]=1E-6,lowercase_ : Tuple=1.0,lowercase_ : Any="relu",lowercase_ : Union[str, Any]=True,lowercase_ : Optional[Any]=True,lowercase_ : int=0,lowercase_ : str=1,**lowercase_ : str,)-> Any: '''simple docstring''' A__ = vocab_size A__ = d_model A__ = d_kv A__ = d_ff A__ = num_layers A__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry A__ = num_heads A__ = relative_attention_num_buckets A__ = relative_attention_max_distance A__ = dropout_rate A__ = layer_norm_epsilon A__ = initializer_factor A__ = feed_forward_proj A__ = use_cache A__ = self.feed_forward_proj.split('-' ) A__ = act_info[-1] A__ = act_info[0] == 'gated' if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": A__ = 'gelu_new' super().__init__( pad_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,**lowercase_,) class A ( _UpperCAmelCase ): """simple docstring""" @property def snake_case__ ( self : Tuple )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' A__ = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: A__ = 'past_encoder_sequence + sequence' A__ = {0: 'batch'} A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: A__ = {0: 'batch', 1: 'decoder_sequence'} A__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowercase_,direction='inputs' ) return common_inputs @property def snake_case__ ( self : Any )-> int: '''simple docstring''' return 1_3
7
1
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class A : """simple docstring""" lowerCamelCase = LEDConfig lowerCamelCase = {} lowerCamelCase = 'gelu' def __init__( self : str,lowercase_ : Any,lowercase_ : int=1_3,lowercase_ : Any=7,lowercase_ : List[str]=True,lowercase_ : Tuple=False,lowercase_ : List[Any]=9_9,lowercase_ : Dict=3_2,lowercase_ : Tuple=2,lowercase_ : str=4,lowercase_ : Optional[int]=3_7,lowercase_ : Optional[int]=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[str]=2_0,lowercase_ : List[str]=2,lowercase_ : Tuple=1,lowercase_ : List[Any]=0,lowercase_ : int=4,)-> Dict: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id A__ = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after A__ = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests A__ = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def snake_case__ ( self : Dict )-> str: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length - 1],self.vocab_size ) A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ),1 ) A__ = tf.concat([input_ids, eos_tensor],axis=1 ) A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) A__ = self.config_cls( vocab_size=self.vocab_size,d_model=self.hidden_size,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,eos_token_ids=[2],bos_token_id=self.bos_token_id,pad_token_id=self.pad_token_id,decoder_start_token_id=self.pad_token_id,attention_window=self.attention_window,**self.config_updates,) A__ = prepare_led_inputs_dict(lowercase_,lowercase_,lowercase_ ) A__ = tf.concat( [tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]],axis=-1,) A__ = global_attention_mask return config, inputs_dict def snake_case__ ( self : Dict,lowercase_ : Optional[Any],lowercase_ : Any )-> int: '''simple docstring''' A__ = TFLEDModel(config=lowercase_ ).get_decoder() A__ = inputs_dict['input_ids'] A__ = input_ids[:1, :] A__ = inputs_dict['attention_mask'][:1, :] A__ = 1 # first forward pass A__ = model(lowercase_,attention_mask=lowercase_,use_cache=lowercase_ ) A__ , A__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3),config.vocab_size ) A__ = tf.cast(ids_tensor((self.batch_size, 3),2 ),tf.inta ) # append to next input_ids and A__ = tf.concat([input_ids, next_tokens],axis=-1 ) A__ = tf.concat([attention_mask, next_attn_mask],axis=-1 ) A__ = model(lowercase_,attention_mask=lowercase_ )[0] A__ = model(lowercase_,attention_mask=lowercase_,past_key_values=lowercase_ )[0] self.parent.assertEqual(next_tokens.shape[1],output_from_past.shape[1] ) # select random slice A__ = int(ids_tensor((1,),output_from_past.shape[-1] ) ) A__ = output_from_no_past[:, -3:, random_slice_idx] A__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase_,lowercase_,rtol=1E-3 ) def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Any: '''simple docstring''' if attention_mask is None: A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: A__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else () lowerCamelCase = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) lowerCamelCase = True lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = TFLEDModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_ ) def snake_case__ ( self : Dict )-> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self : Optional[Any] )-> Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ ) def snake_case__ ( self : List[Any] )-> Union[str, Any]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = tf.zeros_like(inputs_dict['attention_mask'] ) A__ = 2 A__ = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices,1,inputs_dict['global_attention_mask'],) A__ = True A__ = self.model_tester.seq_length A__ = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase_ : int ): A__ = outputs.decoder_attentions self.assertEqual(len(lowercase_ ),self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, seq_length, seq_length],) def check_encoder_attentions_output(lowercase_ : List[str] ): A__ = [t.numpy() for t in outputs.encoder_attentions] A__ = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase_ ),self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase_ ),self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, seq_length, seq_length],) self.assertListEqual( list(global_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices],) for model_class in self.all_model_classes: A__ = True A__ = False A__ = False A__ = model_class(lowercase_ ) A__ = model(self._prepare_for_class(lowercase_,lowercase_ ) ) A__ = len(lowercase_ ) self.assertEqual(config.output_hidden_states,lowercase_ ) check_encoder_attentions_output(lowercase_ ) if self.is_encoder_decoder: A__ = model_class(lowercase_ ) A__ = model(self._prepare_for_class(lowercase_,lowercase_ ) ) self.assertEqual(config.output_hidden_states,lowercase_ ) check_decoder_attentions_output(lowercase_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] A__ = True A__ = model_class(lowercase_ ) A__ = model(self._prepare_for_class(lowercase_,lowercase_ ) ) self.assertEqual(config.output_hidden_states,lowercase_ ) check_encoder_attentions_output(lowercase_ ) # Check attention is always last and order is fine A__ = True A__ = True A__ = model_class(lowercase_ ) A__ = model(self._prepare_for_class(lowercase_,lowercase_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(lowercase_ ) ) self.assertEqual(model.config.output_hidden_states,lowercase_ ) check_encoder_attentions_output(lowercase_ ) @unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' ) def snake_case__ ( self : Optional[int] )-> Dict: '''simple docstring''' pass def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' pass def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> Any: '''simple docstring''' return tf.constant(SCREAMING_SNAKE_CASE__ , dtype=tf.intaa ) lowercase_ = 1e-4 @slow @require_tf class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : str )-> Union[str, Any]: '''simple docstring''' A__ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led # change to intended input here A__ = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) A__ = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) A__ = prepare_led_inputs_dict(model.config,lowercase_,lowercase_ ) A__ = model(**lowercase_ )[0] A__ = (1, 1_0_2_4, 7_6_8) self.assertEqual(output.shape,lowercase_ ) # change to expected output here A__ = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]],) tf.debugging.assert_near(output[:, :3, :3],lowercase_,atol=1E-3 ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' A__ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ) # change to intended input here A__ = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) A__ = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) A__ = prepare_led_inputs_dict(model.config,lowercase_,lowercase_ ) A__ = model(**lowercase_ )[0] A__ = (1, 1_0_2_4, model.config.vocab_size) self.assertEqual(output.shape,lowercase_ ) # change to expected output here A__ = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]],) tf.debugging.assert_near(output[:, :3, :3],lowercase_,atol=1E-3,rtol=1E-3 )
7
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: A__ = mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: A__ = max( mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - wt[i - 1] ) + val[i - 1] , ) A__ = val return f[i][j] def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple: '''simple docstring''' A__ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: A__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: A__ = dp[i - 1][w_] return dp[n][w_], dp def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ) -> Union[str, Any]: '''simple docstring''' if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) A__ = len(SCREAMING_SNAKE_CASE__ ) if num_items != len(SCREAMING_SNAKE_CASE__ ): A__ = ( 'The number of weights must be the same as the number of values.\n' f'But got {num_items} weights and {len(SCREAMING_SNAKE_CASE__ )} values' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ): if not isinstance(wt[i] , SCREAMING_SNAKE_CASE__ ): A__ = ( 'All weights must be integers but got weight of ' f'type {type(wt[i] )} at index {i}' ) raise TypeError(SCREAMING_SNAKE_CASE__ ) A__ , A__ = knapsack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = set() _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return optimal_val, example_optional_set def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : set ) -> Optional[int]: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: optimal_set.add(SCREAMING_SNAKE_CASE__ ) _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , j - wt[i - 1] , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = [3, 2, 4, 4] lowercase_ = [4, 3, 2, 3] lowercase_ = 4 lowercase_ = 6 lowercase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowercase_ , lowercase_ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowercase_ , lowercase_ = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
7
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowercase_ = "pt" elif is_tf_available(): lowercase_ = "tf" else: lowercase_ = "jax" class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ByTaTokenizer lowerCamelCase = False def snake_case__ ( self : Tuple )-> Any: '''simple docstring''' super().setUp() A__ = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def snake_case__ ( self : List[Any] )-> Optional[int]: '''simple docstring''' return ByTaTokenizer.from_pretrained('google/byt5-small' ) def snake_case__ ( self : List[Any],**lowercase_ : Tuple )-> ByTaTokenizer: '''simple docstring''' return self.tokenizer_class.from_pretrained(self.tmpdirname,**lowercase_ ) def snake_case__ ( self : Tuple,lowercase_ : List[str],lowercase_ : List[Any]=False,lowercase_ : Optional[int]=2_0,lowercase_ : Dict=5 )-> Tuple[str, list]: '''simple docstring''' A__ = [] for i in range(len(lowercase_ ) ): try: A__ = tokenizer.decode([i],clean_up_tokenization_spaces=lowercase_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) A__ = list(filter(lambda lowercase_ : re.match(r'^[ a-zA-Z]+$',t[1] ),lowercase_ ) ) A__ = list(filter(lambda lowercase_ : [t[0]] == tokenizer.encode(t[1],add_special_tokens=lowercase_ ),lowercase_ ) ) if max_length is not None and len(lowercase_ ) > max_length: A__ = toks[:max_length] if min_length is not None and len(lowercase_ ) < min_length and len(lowercase_ ) > 0: while len(lowercase_ ) < min_length: A__ = toks + toks # toks_str = [t[1] for t in toks] A__ = [t[0] for t in toks] # Ensure consistency A__ = tokenizer.decode(lowercase_,clean_up_tokenization_spaces=lowercase_ ) if " " not in output_txt and len(lowercase_ ) > 1: A__ = ( tokenizer.decode([toks_ids[0]],clean_up_tokenization_spaces=lowercase_ ) + ' ' + tokenizer.decode(toks_ids[1:],clean_up_tokenization_spaces=lowercase_ ) ) if with_prefix_space: A__ = ' ' + output_txt A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) return output_txt, output_ids def snake_case__ ( self : Optional[int] )-> Any: '''simple docstring''' A__ = self.ta_base_tokenizer A__ = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) A__ = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'],batch_without_eos_added['input_ids'] ) def snake_case__ ( self : Union[str, Any] )-> Any: '''simple docstring''' A__ = self.ta_base_tokenizer A__ = 'Unicode €.' A__ = tokenizer(lowercase_ ) A__ = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1] self.assertEqual(encoded['input_ids'],lowercase_ ) # decoding A__ = tokenizer.decode(lowercase_ ) self.assertEqual(lowercase_,'Unicode €.</s>' ) A__ = tokenizer('e è é ê ë' ) A__ = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1] self.assertEqual(encoded['input_ids'],lowercase_ ) # decoding A__ = tokenizer.decode(lowercase_ ) self.assertEqual(lowercase_,'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ),'e è é ê ë</s>' ) def snake_case__ ( self : str )-> Dict: '''simple docstring''' A__ = self.ta_base_tokenizer A__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off A__ = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0] # fmt: on A__ = tokenizer(lowercase_,padding=lowercase_,return_tensors=lowercase_ ) self.assertIsInstance(lowercase_,lowercase_ ) if FRAMEWORK != "jax": A__ = list(batch.input_ids.numpy()[0] ) else: A__ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(lowercase_,lowercase_ ) self.assertEqual((2, 3_7),batch.input_ids.shape ) self.assertEqual((2, 3_7),batch.attention_mask.shape ) def snake_case__ ( self : Optional[Any] )-> Tuple: '''simple docstring''' A__ = self.ta_base_tokenizer A__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] A__ = tokenizer(lowercase_,padding=lowercase_,return_tensors=lowercase_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids',lowercase_ ) self.assertIn('attention_mask',lowercase_ ) self.assertNotIn('decoder_input_ids',lowercase_ ) self.assertNotIn('decoder_attention_mask',lowercase_ ) def snake_case__ ( self : Optional[Any] )-> List[str]: '''simple docstring''' A__ = self.ta_base_tokenizer A__ = [ 'Summary of the text.', 'Another summary.', ] A__ = tokenizer( text_target=lowercase_,max_length=3_2,padding='max_length',truncation=lowercase_,return_tensors=lowercase_ ) self.assertEqual(3_2,targets['input_ids'].shape[1] ) def snake_case__ ( self : str )-> Optional[int]: '''simple docstring''' A__ = self.ta_base_tokenizer A__ = ['A long paragraph for summarization. </s>'] A__ = ['Summary of the text. </s>'] # fmt: off A__ = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1] A__ = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1] # fmt: on A__ = tokenizer(lowercase_,text_target=lowercase_ ) self.assertEqual(lowercase_,batch['input_ids'][0] ) self.assertEqual(lowercase_,batch['labels'][0] ) def snake_case__ ( self : Optional[int] )-> Optional[Any]: '''simple docstring''' A__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length,4_2 ) # Now let's start the test A__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc A__ = tempfile.mkdtemp() A__ = ' He is very happy, UNwant\u00E9d,running' A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) tokenizer.save_pretrained(lowercase_ ) A__ = tokenizer.__class__.from_pretrained(lowercase_ ) A__ = after_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) shutil.rmtree(lowercase_ ) A__ = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc A__ = tempfile.mkdtemp() A__ = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) A__ = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) tokenizer.save_pretrained(lowercase_ ) A__ = tokenizer.__class__.from_pretrained(lowercase_ ) A__ = after_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) self.assertIn('new_additional_special_token',after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length,4_2 ) A__ = tokenizer.__class__.from_pretrained(lowercase_,model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length,4_3 ) shutil.rmtree(lowercase_ ) def snake_case__ ( self : Tuple )-> Union[str, Any]: '''simple docstring''' A__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowercase_ ) with open(os.path.join(lowercase_,'special_tokens_map.json' ),encoding='utf-8' ) as json_file: A__ = json.load(lowercase_ ) with open(os.path.join(lowercase_,'tokenizer_config.json' ),encoding='utf-8' ) as json_file: A__ = json.load(lowercase_ ) A__ = [F'<extra_id_{i}>' for i in range(1_2_5 )] A__ = added_tokens_extra_ids + [ 'an_additional_special_token' ] A__ = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(lowercase_,'special_tokens_map.json' ),'w',encoding='utf-8' ) as outfile: json.dump(lowercase_,lowercase_ ) with open(os.path.join(lowercase_,'tokenizer_config.json' ),'w',encoding='utf-8' ) as outfile: json.dump(lowercase_,lowercase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files A__ = tokenizer_class.from_pretrained( lowercase_,) self.assertIn( 'an_additional_special_token',tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'],tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ),) # Now we test that we can change the value of additional_special_tokens in the from_pretrained A__ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token',lstrip=lowercase_ )] A__ = tokenizer_class.from_pretrained( lowercase_,additional_special_tokens=lowercase_,) self.assertIn('a_new_additional_special_token',tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'],tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ),) def snake_case__ ( self : List[str] )-> Any: '''simple docstring''' A__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowercase_ ) A__ = tokenizer_class.from_pretrained(lowercase_ ) self.assertTrue(tokenizer.decode([2_5_5] ) == '' ) def snake_case__ ( self : List[str] )-> Optional[Any]: '''simple docstring''' pass def snake_case__ ( self : Optional[Any] )-> List[str]: '''simple docstring''' pass def snake_case__ ( self : List[str] )-> Any: '''simple docstring''' pass def snake_case__ ( self : Dict )-> Tuple: '''simple docstring''' pass def snake_case__ ( self : Optional[Any] )-> Optional[Any]: '''simple docstring''' A__ = self.get_tokenizers(fast=lowercase_,do_lower_case=lowercase_ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A__ = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] A__ = tokenizer.convert_tokens_to_string(lowercase_ ) self.assertIsInstance(lowercase_,lowercase_ ) def snake_case__ ( self : Tuple )-> str: '''simple docstring''' A__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A__ = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] A__ = 0 A__ = tokenizer.convert_ids_to_tokens( lowercase_,skip_special_tokens=lowercase_ ) for attr in attributes_list: setattr(lowercase_,attr + '_id',lowercase_ ) self.assertEqual(getattr(lowercase_,lowercase_ ),lowercase_ ) self.assertEqual(getattr(lowercase_,attr + '_id' ),lowercase_ ) setattr(lowercase_,attr + '_id',lowercase_ ) self.assertEqual(getattr(lowercase_,lowercase_ ),lowercase_ ) self.assertEqual(getattr(lowercase_,attr + '_id' ),lowercase_ ) setattr(lowercase_,'additional_special_tokens_ids',[] ) self.assertListEqual(getattr(lowercase_,'additional_special_tokens' ),[] ) self.assertListEqual(getattr(lowercase_,'additional_special_tokens_ids' ),[] ) setattr(lowercase_,'additional_special_tokens_ids',[token_id_to_test_setters] ) self.assertListEqual(getattr(lowercase_,'additional_special_tokens' ),[token_to_test_setters] ) self.assertListEqual(getattr(lowercase_,'additional_special_tokens_ids' ),[token_id_to_test_setters] )
7
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = AlbertTokenizer lowerCamelCase = AlbertTokenizerFast lowerCamelCase = True lowerCamelCase = True lowerCamelCase = True def snake_case__ ( self : Dict )-> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = AlbertTokenizer(lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : List[str],lowercase_ : str )-> Any: '''simple docstring''' A__ = 'this is a test' A__ = 'this is a test' return input_text, output_text def snake_case__ ( self : List[Any] )-> Optional[int]: '''simple docstring''' A__ = '<pad>' A__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : List[str] )-> str: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0],'<pad>' ) self.assertEqual(vocab_keys[1],'<unk>' ) self.assertEqual(vocab_keys[-1],'▁eloquent' ) self.assertEqual(len(lowercase_ ),3_0_0_0_0 ) def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size,3_0_0_0_0 ) def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = 'I was born in 92000, and this is falsé.' A__ = tokenizer.tokenize(lowercase_ ) A__ = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(lowercase_ ) A__ = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) def snake_case__ ( self : int )-> int: '''simple docstring''' A__ = AlbertTokenizer(lowercase_,keep_accents=lowercase_ ) A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁this', '▁is', '▁a', '▁test'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[4_8, 2_5, 2_1, 1_2_8_9] ) A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] ) A__ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual(lowercase_,[3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] ) A__ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'],) def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' A__ = AlbertTokenizer(lowercase_ ) A__ = tokenizer.encode('sequence builders' ) A__ = tokenizer.encode('multi-sequence build' ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' A__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='albert-base-v2',revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e',)
7
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'switch_transformers' lowerCamelCase = ['past_key_values'] lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Dict,lowercase_ : Optional[int]=3_2_1_2_8,lowercase_ : List[Any]=7_6_8,lowercase_ : Union[str, Any]=6_4,lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Dict=6_4,lowercase_ : List[Any]=1_2,lowercase_ : Optional[int]=3,lowercase_ : List[Any]=1_2,lowercase_ : Dict=3,lowercase_ : Any=1_2,lowercase_ : Optional[int]=8,lowercase_ : str=False,lowercase_ : Dict=0.01,lowercase_ : Optional[Any]="float32",lowercase_ : Any=False,lowercase_ : str=3_2,lowercase_ : List[Any]=1_2_8,lowercase_ : int=0.1,lowercase_ : Union[str, Any]=1E-6,lowercase_ : Dict=0.001,lowercase_ : List[Any]=0.001,lowercase_ : Dict=1.0,lowercase_ : Optional[int]="relu",lowercase_ : Dict=True,lowercase_ : Union[str, Any]=False,lowercase_ : Union[str, Any]=True,lowercase_ : List[str]=0,lowercase_ : int=1,**lowercase_ : Union[str, Any],)-> Tuple: '''simple docstring''' A__ = vocab_size A__ = d_model A__ = d_kv A__ = d_ff A__ = num_sparse_encoder_layers A__ = num_layers A__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry A__ = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: A__ = self.num_layers // self.num_sparse_encoder_layers else: A__ = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: A__ = self.num_decoder_layers // self.num_sparse_decoder_layers else: A__ = self.num_decoder_layers # HACK: this will create 0 sparse layers A__ = num_heads A__ = num_experts A__ = expert_capacity A__ = router_bias A__ = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' ) A__ = router_dtype A__ = router_ignore_padding_tokens A__ = relative_attention_num_buckets A__ = relative_attention_max_distance A__ = dropout_rate A__ = layer_norm_epsilon A__ = initializer_factor A__ = feed_forward_proj A__ = use_cache A__ = add_router_probs A__ = router_z_loss_coef A__ = router_aux_loss_coef A__ = self.feed_forward_proj.split('-' ) A__ = act_info[-1] A__ = act_info[0] == 'gated' if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": A__ = 'gelu_new' super().__init__( pad_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,**lowercase_,)
7
from typing import Dict from .base import GenericTensor, Pipeline class A ( _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : int,lowercase_ : Dict=None,lowercase_ : Tuple=None,lowercase_ : List[Any]=None,**lowercase_ : Any )-> Optional[Any]: '''simple docstring''' if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def snake_case__ ( self : Dict,lowercase_ : List[Any],**lowercase_ : Tuple )-> Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework A__ = self.tokenizer(lowercase_,return_tensors=lowercase_,**lowercase_ ) return model_inputs def snake_case__ ( self : Tuple,lowercase_ : int )-> Optional[Any]: '''simple docstring''' A__ = self.model(**lowercase_ ) return model_outputs def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : List[str]=False )-> Any: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[Any],*lowercase_ : int,**lowercase_ : Optional[Any] )-> int: '''simple docstring''' return super().__call__(*lowercase_,**lowercase_ )
7
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { "configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"], "processing_mgp_str": ["MgpstrProcessor"], "tokenization_mgp_str": ["MgpstrTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST", "MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition", ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
from timeit import timeit def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) A__ = 0 while number: number &= number - 1 result += 1 return result def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) A__ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def _snake_case( ) -> None: '''simple docstring''' def do_benchmark(SCREAMING_SNAKE_CASE__ : int ) -> None: A__ = 'import __main__ as z' print(f'Benchmark when {number = }:' ) print(f'{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE__ ) = }' ) A__ = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=SCREAMING_SNAKE_CASE__ ) print(f'timeit() runs in {timing} seconds' ) print(f'{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE__ ) = }' ) A__ = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=SCREAMING_SNAKE_CASE__ , ) print(f'timeit() runs in {timing} seconds' ) for number in (25, 37, 58, 0): do_benchmark(SCREAMING_SNAKE_CASE__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
7
1
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> int: '''simple docstring''' assert x is not None assert y is not None A__ = len(SCREAMING_SNAKE_CASE__ ) A__ = len(SCREAMING_SNAKE_CASE__ ) # declaring the array for storing the dp values A__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): A__ = 1 if x[i - 1] == y[j - 1] else 0 A__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) A__ = '' A__ , A__ = m, n while i > 0 and j > 0: A__ = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: A__ = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": lowercase_ = "AGGTAB" lowercase_ = "GXTXAYB" lowercase_ = 4 lowercase_ = "GTAB" lowercase_ , lowercase_ = longest_common_subsequence(a, b) print("len =", ln, ", sub-sequence =", subseq) import doctest doctest.testmod()
7
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int: '''simple docstring''' A__ = 384 A__ = 7 if "tiny" in model_name: A__ = 96 A__ = (2, 2, 6, 2) A__ = (3, 6, 12, 24) elif "small" in model_name: A__ = 96 A__ = (2, 2, 18, 2) A__ = (3, 6, 12, 24) elif "base" in model_name: A__ = 128 A__ = (2, 2, 18, 2) A__ = (4, 8, 16, 32) A__ = 12 A__ = 512 elif "large" in model_name: A__ = 192 A__ = (2, 2, 18, 2) A__ = (6, 12, 24, 48) A__ = 12 A__ = 768 # set label information A__ = 150 A__ = 'huggingface/label-files' A__ = 'ade20k-id2label.json' A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) ) A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} A__ = {v: k for k, v in idalabel.items()} A__ = SwinConfig( embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) A__ = UperNetConfig( backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , ) return config def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: '''simple docstring''' A__ = [] # fmt: off # stem rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]: '''simple docstring''' A__ = dct.pop(SCREAMING_SNAKE_CASE__ ) A__ = val def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: '''simple docstring''' A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): A__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' ) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[:dim, :] A__ = in_proj_bias[: dim] A__ = in_proj_weight[ dim : dim * 2, : ] A__ = in_proj_bias[ dim : dim * 2 ] A__ = in_proj_weight[ -dim :, : ] A__ = in_proj_bias[-dim :] # fmt: on def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 ) A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 ) A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(4 , in_channel // 4 ) A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(in_channel // 4 , 4 ) A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A__ = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } A__ = model_name_to_url[model_name] A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[ 'state_dict' ] for name, param in state_dict.items(): print(SCREAMING_SNAKE_CASE__ , param.shape ) A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ ) A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "bn" in key: A__ = key.replace('bn' , 'batch_norm' ) A__ = val # rename keys A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ ) if "norm" in key: A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # verify on image A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' ) A__ = SegformerImageProcessor() A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits print(logits.shape ) print('First values of logits:' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": A__ = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": A__ = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": A__ = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": A__ = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(f'openmmlab/{model_name}' ) processor.push_to_hub(f'openmmlab/{model_name}' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-swin-tiny", type=str, choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]], help="Name of the Swin + UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowercase_ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
7
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "google/pix2struct-textcaps-base": ( "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json" ), } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'pix2struct_text_model' lowerCamelCase = ['past_key_values'] lowerCamelCase = { 'hidden_size': 'hidden_size', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : List[Any],lowercase_ : Union[str, Any]=5_0_2_4_4,lowercase_ : Optional[int]=7_6_8,lowercase_ : Tuple=6_4,lowercase_ : Optional[Any]=2_0_4_8,lowercase_ : Tuple=1_2,lowercase_ : List[Any]=1_2,lowercase_ : Union[str, Any]=3_2,lowercase_ : Any=1_2_8,lowercase_ : Optional[int]=0.1,lowercase_ : Tuple=1E-6,lowercase_ : List[str]=1.0,lowercase_ : str="gelu_new",lowercase_ : int=0,lowercase_ : Union[str, Any]=False,lowercase_ : Tuple=0,lowercase_ : Optional[Any]=1,lowercase_ : List[Any]=False,lowercase_ : Any=True,**lowercase_ : Dict,)-> int: '''simple docstring''' A__ = vocab_size A__ = hidden_size A__ = d_kv A__ = d_ff A__ = num_layers A__ = num_heads A__ = relative_attention_num_buckets A__ = relative_attention_max_distance A__ = dropout_rate A__ = layer_norm_epsilon A__ = initializer_factor A__ = use_cache A__ = eos_token_id A__ = decoder_start_token_id # for backwards compatibility A__ = dense_act_fn super().__init__( pad_token_id=lowercase_,eos_token_id=lowercase_,decoder_start_token_id=lowercase_,tie_word_embeddings=lowercase_,is_decoder=lowercase_,**lowercase_,) @classmethod def snake_case__ ( cls : str,lowercase_ : Union[str, os.PathLike],**lowercase_ : int )-> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(lowercase_ ) A__ , A__ = cls.get_config_dict(lowercase_,**lowercase_ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('model_type' ) == "pix2struct": A__ = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase_,**lowercase_ ) class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'pix2struct_vision_model' def __init__( self : int,lowercase_ : str=7_6_8,lowercase_ : Optional[Any]=7_6_8,lowercase_ : Dict=2_0_4_8,lowercase_ : List[Any]=6_4,lowercase_ : int=1_2,lowercase_ : List[str]=1_2,lowercase_ : List[Any]="gelu_new",lowercase_ : List[str]=1E-6,lowercase_ : Optional[Any]=0.0,lowercase_ : Optional[Any]=0.0,lowercase_ : Union[str, Any]=1E-10,lowercase_ : Optional[Any]=1.0,lowercase_ : List[Any]=4_0_9_6,lowercase_ : Union[str, Any]=3_2,lowercase_ : Dict=1_2_8,**lowercase_ : Dict,)-> List[Any]: '''simple docstring''' super().__init__(**lowercase_ ) A__ = hidden_size A__ = patch_embed_hidden_size A__ = d_ff A__ = dropout_rate A__ = num_hidden_layers A__ = num_attention_heads A__ = initializer_range A__ = initializer_factor A__ = attention_dropout A__ = layer_norm_eps A__ = dense_act_fn A__ = seq_len A__ = relative_attention_num_buckets A__ = relative_attention_max_distance A__ = d_kv @classmethod def snake_case__ ( cls : List[str],lowercase_ : Union[str, os.PathLike],**lowercase_ : Dict )-> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(lowercase_ ) A__ , A__ = cls.get_config_dict(lowercase_,**lowercase_ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('model_type' ) == "pix2struct": A__ = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowercase_,**lowercase_ ) class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'pix2struct' lowerCamelCase = True def __init__( self : Any,lowercase_ : Tuple=None,lowercase_ : List[str]=None,lowercase_ : Optional[int]=1.0,lowercase_ : Optional[int]=0.02,lowercase_ : Tuple=False,lowercase_ : int=False,lowercase_ : Any=True,**lowercase_ : Tuple,)-> Any: '''simple docstring''' super().__init__(tie_word_embeddings=lowercase_,is_encoder_decoder=lowercase_,**lowercase_ ) if text_config is None: A__ = {} logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' ) if vision_config is None: A__ = {} logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' ) A__ = PixaStructTextConfig(**lowercase_ ) A__ = PixaStructVisionConfig(**lowercase_ ) A__ = self.text_config.decoder_start_token_id A__ = self.text_config.pad_token_id A__ = self.text_config.eos_token_id A__ = initializer_factor A__ = initializer_range A__ = self.initializer_range A__ = self.initializer_range A__ = is_vqa @classmethod def snake_case__ ( cls : List[Any],lowercase_ : PixaStructTextConfig,lowercase_ : PixaStructVisionConfig,**lowercase_ : int )-> Dict: '''simple docstring''' return cls(text_config=text_config.to_dict(),vision_config=vision_config.to_dict(),**lowercase_ ) def snake_case__ ( self : Tuple )-> Any: '''simple docstring''' A__ = copy.deepcopy(self.__dict__ ) A__ = self.text_config.to_dict() A__ = self.vision_config.to_dict() A__ = self.__class__.model_type return output
7
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed lowercase_ = "true" def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=82 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 ) -> Optional[Any]: '''simple docstring''' set_seed(42 ) A__ = RegressionModel() A__ = deepcopy(SCREAMING_SNAKE_CASE__ ) A__ = RegressionDataset(length=SCREAMING_SNAKE_CASE__ ) A__ = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) model.to(accelerator.device ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model, ddp_model, dataloader def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' ) A__ = load_dataset('glue' , 'mrpc' , split='validation' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : List[Any] ): A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs with accelerator.main_process_first(): A__ = dataset.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) A__ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(SCREAMING_SNAKE_CASE__ : Dict ): if use_longest: return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=16 ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> str: '''simple docstring''' A__ = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE__ , split_batches=SCREAMING_SNAKE_CASE__ ) A__ = get_dataloader(SCREAMING_SNAKE_CASE__ , not dispatch_batches ) A__ = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: '''simple docstring''' A__ = [] for batch in dataloader: A__ , A__ = batch.values() with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) A__ , A__ = [], [] for logit, targ in logits_and_targets: logits.append(SCREAMING_SNAKE_CASE__ ) targs.append(SCREAMING_SNAKE_CASE__ ) A__ , A__ = torch.cat(SCREAMING_SNAKE_CASE__ ), torch.cat(SCREAMING_SNAKE_CASE__ ) return logits, targs def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int=82 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=16 ) -> List[Any]: '''simple docstring''' A__ , A__ , A__ = get_basic_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ , A__ = generate_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert ( len(SCREAMING_SNAKE_CASE__ ) == num_samples ), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE__ )}' def _snake_case( SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False ) -> str: '''simple docstring''' A__ = evaluate.load('glue' , 'mrpc' ) A__ , A__ = get_mrpc_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # First do baseline A__ , A__ , A__ = setup['no'] model.to(SCREAMING_SNAKE_CASE__ ) model.eval() for batch in dataloader: batch.to(SCREAMING_SNAKE_CASE__ ) with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=batch['labels'] ) A__ = metric.compute() # Then do distributed A__ , A__ , A__ = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) A__ = batch['labels'] A__ , A__ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ ) A__ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n' def _snake_case( ) -> Optional[Any]: '''simple docstring''' A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' ) test_mrpc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' ) test_torch_metrics(SCREAMING_SNAKE_CASE__ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**' ) A__ = Accelerator() test_torch_metrics(SCREAMING_SNAKE_CASE__ , 512 ) accelerator.state._reset_state() def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' main() if __name__ == "__main__": main()
7
1
class A : """simple docstring""" def __init__( self : Any,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : List[str] )-> List[Any]: '''simple docstring''' A__ = name A__ = value A__ = weight def __repr__( self : int )-> Tuple: '''simple docstring''' return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def snake_case__ ( self : Any )-> str: '''simple docstring''' return self.value def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' return self.name def snake_case__ ( self : Any )-> Dict: '''simple docstring''' return self.weight def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' return self.value / self.weight def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: '''simple docstring''' A__ = [] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Any: '''simple docstring''' A__ = sorted(SCREAMING_SNAKE_CASE__ , key=SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ) A__ = [] A__ , A__ = 0.0, 0.0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _snake_case( ) -> Any: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
7
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: '''simple docstring''' A__ = 0 A__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None A__ = sorted_collection[point] if current_item == item: return point else: if point < left: A__ = left A__ = point elif point > right: A__ = right A__ = point else: if item < current_item: A__ = point - 1 else: A__ = point + 1 return None def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif point > right: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 ) else: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' if collection != sorted(SCREAMING_SNAKE_CASE__ ): raise ValueError('Collection must be ascending sorted' ) return True if __name__ == "__main__": import sys lowercase_ = 0 if debug == 1: lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") lowercase_ = 67 lowercase_ = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("Not found")
7
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
7
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def _snake_case( ) -> Dict: '''simple docstring''' A__ = ArgumentParser( 'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE__ ) A__ = parser.add_subparsers(help='datasets-cli command helpers' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) TestCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) # Parse args A__ , A__ = parser.parse_known_args() if not hasattr(SCREAMING_SNAKE_CASE__ , 'func' ): parser.print_help() exit(1 ) A__ = parse_unknown_args(SCREAMING_SNAKE_CASE__ ) # Run A__ = args.func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) service.run() if __name__ == "__main__": main()
7
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class A ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int],lowercase_ : str,lowercase_ : Optional[Any]=7,lowercase_ : List[str]=3,lowercase_ : Optional[Any]=1_8,lowercase_ : int=3_0,lowercase_ : List[Any]=4_0_0,lowercase_ : str=True,lowercase_ : List[str]=None,lowercase_ : str=True,lowercase_ : Optional[int]=None,lowercase_ : List[Any]=True,)-> Optional[Any]: '''simple docstring''' A__ = size if size is not None else {'shortest_edge': 2_0} A__ = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} A__ = parent A__ = batch_size A__ = num_channels A__ = image_size A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_center_crop A__ = crop_size A__ = do_flip_channel_order def snake_case__ ( self : Optional[Any] )-> Dict: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = MobileViTImageProcessor if is_vision_available() else None def snake_case__ ( self : Optional[Any] )-> Union[str, Any]: '''simple docstring''' A__ = MobileViTImageProcessingTester(self ) @property def snake_case__ ( self : int )-> str: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case__ ( self : Optional[Any] )-> List[str]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_,'do_resize' ) ) self.assertTrue(hasattr(lowercase_,'size' ) ) self.assertTrue(hasattr(lowercase_,'do_center_crop' ) ) self.assertTrue(hasattr(lowercase_,'center_crop' ) ) self.assertTrue(hasattr(lowercase_,'do_flip_channel_order' ) ) def snake_case__ ( self : Any )-> List[str]: '''simple docstring''' A__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size,{'shortest_edge': 2_0} ) self.assertEqual(image_processor.crop_size,{'height': 1_8, 'width': 1_8} ) A__ = self.image_processing_class.from_dict(self.image_processor_dict,size=4_2,crop_size=8_4 ) self.assertEqual(image_processor.size,{'shortest_edge': 4_2} ) self.assertEqual(image_processor.crop_size,{'height': 8_4, 'width': 8_4} ) def snake_case__ ( self : int )-> List[str]: '''simple docstring''' pass def snake_case__ ( self : Union[str, Any] )-> int: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_,Image.Image ) # Test not batched input A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ),) # Test batched A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ),) def snake_case__ ( self : Optional[Any] )-> Any: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_,numpify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_,np.ndarray ) # Test not batched input A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ),) # Test batched A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ),) def snake_case__ ( self : List[str] )-> Tuple: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_,torchify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_,torch.Tensor ) # Test not batched input A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ),) # Test batched A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ),)
7
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A : """simple docstring""" def __init__( self : Union[str, Any],lowercase_ : Any,lowercase_ : Union[str, Any]=1_3,lowercase_ : Tuple=3_0,lowercase_ : List[Any]=2,lowercase_ : Optional[int]=3,lowercase_ : Union[str, Any]=True,lowercase_ : Tuple=True,lowercase_ : Any=3_2,lowercase_ : List[str]=2,lowercase_ : Optional[int]=4,lowercase_ : Union[str, Any]=3_7,lowercase_ : Tuple="gelu",lowercase_ : str=0.1,lowercase_ : Tuple=0.1,lowercase_ : Union[str, Any]=1_0,lowercase_ : int=0.02,lowercase_ : List[Any]=3,lowercase_ : Any=None,)-> Dict: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A__ = (image_size // patch_size) ** 2 A__ = num_patches + 1 def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' return ViTConfig( image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=lowercase_,initializer_range=self.initializer_range,) def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Tuple )-> Optional[Any]: '''simple docstring''' A__ = TFViTModel(config=lowercase_ ) A__ = model(lowercase_,training=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) A__ = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) ) def snake_case__ ( self : List[Any],lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : List[Any] )-> Dict: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFViTForImageClassification(lowercase_ ) A__ = model(lowercase_,labels=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ = 1 A__ = TFViTForImageClassification(lowercase_ ) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () lowerCamelCase = ( {'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' A__ = TFViTModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' pass @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Any )-> int: '''simple docstring''' pass def snake_case__ ( self : str )-> Dict: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_,tf.keras.layers.Layer ) ) def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) A__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1],lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : Optional[Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(lowercase_ ) def _snake_case( ) -> str: '''simple docstring''' A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : List[Any] )-> str: '''simple docstring''' return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def snake_case__ ( self : Any )-> Dict: '''simple docstring''' A__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=lowercase_,return_tensors='tf' ) # forward pass A__ = model(**lowercase_ ) # verify the logits A__ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape,lowercase_ ) A__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] ) tf.debugging.assert_near(outputs.logits[0, :3],lowercase_,atol=1E-4 )
7
1
from __future__ import annotations from collections.abc import Callable lowercase_ = list[list[float | int]] def _snake_case( SCREAMING_SNAKE_CASE__ : Matrix , SCREAMING_SNAKE_CASE__ : Matrix ) -> Matrix: '''simple docstring''' A__ = len(SCREAMING_SNAKE_CASE__ ) A__ = [[0 for _ in range(size + 1 )] for _ in range(SCREAMING_SNAKE_CASE__ )] A__ = 42 A__ = 42 A__ = 42 A__ = 42 A__ = 42 A__ = 42 for row in range(SCREAMING_SNAKE_CASE__ ): for col in range(SCREAMING_SNAKE_CASE__ ): A__ = matrix[row][col] A__ = vector[row][0] A__ = 0 A__ = 0 while row < size and col < size: # pivoting A__ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: A__ , A__ = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , SCREAMING_SNAKE_CASE__ ): A__ = augmented[rowa][col] / augmented[row][col] A__ = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , SCREAMING_SNAKE_CASE__ ): for row in range(SCREAMING_SNAKE_CASE__ ): A__ = augmented[row][col] / augmented[col][col] for cola in range(SCREAMING_SNAKE_CASE__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(SCREAMING_SNAKE_CASE__ ) ] def _snake_case( SCREAMING_SNAKE_CASE__ : list[int] ) -> Callable[[int], int]: '''simple docstring''' A__ = len(SCREAMING_SNAKE_CASE__ ) A__ = [[0 for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )] A__ = [[0] for _ in range(SCREAMING_SNAKE_CASE__ )] A__ = 42 A__ = 42 A__ = 42 A__ = 42 for x_val, y_val in enumerate(SCREAMING_SNAKE_CASE__ ): for col in range(SCREAMING_SNAKE_CASE__ ): A__ = (x_val + 1) ** (size - col - 1) A__ = y_val A__ = solve(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def interpolated_func(SCREAMING_SNAKE_CASE__ : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(SCREAMING_SNAKE_CASE__ ) ) return interpolated_func def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def _snake_case( SCREAMING_SNAKE_CASE__ : Callable[[int], int] = question_function , SCREAMING_SNAKE_CASE__ : int = 10 ) -> int: '''simple docstring''' A__ = [func(SCREAMING_SNAKE_CASE__ ) for x_val in range(1 , order + 1 )] A__ = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] A__ = 0 A__ = 42 A__ = 42 for poly in polynomials: A__ = 1 while func(SCREAMING_SNAKE_CASE__ ) == poly(SCREAMING_SNAKE_CASE__ ): x_val += 1 ret += poly(SCREAMING_SNAKE_CASE__ ) return ret if __name__ == "__main__": print(f"""{solution() = }""")
7
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class A : """simple docstring""" def __init__( self : str,lowercase_ : Any,lowercase_ : Tuple=1_3,lowercase_ : str=7,lowercase_ : Tuple=True,lowercase_ : int=True,lowercase_ : List[Any]=True,lowercase_ : List[str]=True,lowercase_ : List[str]=9_9,lowercase_ : List[Any]=6_4,lowercase_ : List[str]=5,lowercase_ : Optional[Any]=4,lowercase_ : Optional[Any]=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : int=0.1,lowercase_ : str=0.1,lowercase_ : Optional[Any]=5_1_2,lowercase_ : int=1_6,lowercase_ : List[Any]=2,lowercase_ : Union[str, Any]=0.02,lowercase_ : Tuple=3,lowercase_ : List[Any]=4,lowercase_ : str=None,)-> Union[str, Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope A__ = vocab_size - 1 def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) A__ = self.get_config() return config, input_ids, input_mask, token_labels def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,pad_token_id=self.pad_token_id,) def snake_case__ ( self : Optional[int] )-> Union[str, Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = True return config, input_ids, input_mask, token_labels def snake_case__ ( self : Any,lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : str )-> Any: '''simple docstring''' A__ = GPTNeoXModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) A__ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Union[str, Any],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Optional[Any] )-> Tuple: '''simple docstring''' A__ = True A__ = GPTNeoXModel(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : List[str] )-> List[str]: '''simple docstring''' A__ = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Dict,lowercase_ : Any )-> int: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForQuestionAnswering(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Optional[int] )-> str: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def snake_case__ ( self : Any,lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : int )-> Union[str, Any]: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForTokenClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : int,lowercase_ : str,lowercase_ : int,lowercase_ : Union[str, Any] )-> List[Any]: '''simple docstring''' A__ = True A__ = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() # first forward pass A__ = model(lowercase_,attention_mask=lowercase_,use_cache=lowercase_ ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3),config.vocab_size ) A__ = ids_tensor((self.batch_size, 3),vocab_size=2 ) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens],dim=-1 ) A__ = torch.cat([input_mask, next_mask],dim=-1 ) A__ = model(lowercase_,attention_mask=lowercase_,output_hidden_states=lowercase_ ) A__ = output_from_no_past['hidden_states'][0] A__ = model( lowercase_,attention_mask=lowercase_,past_key_values=lowercase_,output_hidden_states=lowercase_,)['hidden_states'][0] # select random slice A__ = ids_tensor((1,),output_from_past.shape[-1] ).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-3 ) ) def snake_case__ ( self : str )-> Union[str, Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowerCamelCase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = GPTNeoXModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,hidden_size=6_4,num_attention_heads=8 ) def snake_case__ ( self : Optional[Any] )-> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : List[str] )-> Any: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Dict )-> Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowercase_ ) def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_ ) def snake_case__ ( self : Any )-> List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) @unittest.skip(reason='Feed forward chunking is not implemented' ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)] ) def snake_case__ ( self : List[str],lowercase_ : Any )-> List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = ids_tensor([1, 1_0],config.vocab_size ) A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights A__ = GPTNeoXModel(lowercase_ ) original_model.to(lowercase_ ) original_model.eval() A__ = original_model(lowercase_ ).last_hidden_state A__ = original_model(lowercase_ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights A__ = {'type': scaling_type, 'factor': 10.0} A__ = GPTNeoXModel(lowercase_ ) scaled_model.to(lowercase_ ) scaled_model.eval() A__ = scaled_model(lowercase_ ).last_hidden_state A__ = scaled_model(lowercase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) else: self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) @require_torch class A ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Tuple )-> Union[str, Any]: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' ) for checkpointing in [True, False]: A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowercase_ ) A__ = tokenizer('My favorite food is',return_tensors='pt' ).to(lowercase_ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure' A__ = model.generate(**lowercase_,do_sample=lowercase_,max_new_tokens=2_0 ) A__ = tokenizer.batch_decode(lowercase_ )[0] self.assertEqual(lowercase_,lowercase_ )
7
1
import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class A ( unittest.TestCase ): """simple docstring""" lowerCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def snake_case__ ( self : List[Any],lowercase_ : Union[str, Any],lowercase_ : int,lowercase_ : Dict )-> Any: '''simple docstring''' A__ = TextaTextGenerationPipeline(model=lowercase_,tokenizer=lowercase_ ) return generator, ["Something to write", "Something else"] def snake_case__ ( self : int,lowercase_ : List[str],lowercase_ : List[Any] )-> Optional[int]: '''simple docstring''' A__ = generator('Something there' ) self.assertEqual(lowercase_,[{'generated_text': ANY(lowercase_ )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) A__ = generator(['This is great !', 'Something else'],num_return_sequences=2,do_sample=lowercase_ ) self.assertEqual( lowercase_,[ [{'generated_text': ANY(lowercase_ )}, {'generated_text': ANY(lowercase_ )}], [{'generated_text': ANY(lowercase_ )}, {'generated_text': ANY(lowercase_ )}], ],) A__ = generator( ['This is great !', 'Something else'],num_return_sequences=2,batch_size=2,do_sample=lowercase_ ) self.assertEqual( lowercase_,[ [{'generated_text': ANY(lowercase_ )}, {'generated_text': ANY(lowercase_ )}], [{'generated_text': ANY(lowercase_ )}, {'generated_text': ANY(lowercase_ )}], ],) with self.assertRaises(lowercase_ ): generator(4 ) @require_torch def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' A__ = pipeline('text2text-generation',model='patrickvonplaten/t5-tiny-random',framework='pt' ) # do_sample=False necessary for reproducibility A__ = generator('Something there',do_sample=lowercase_ ) self.assertEqual(lowercase_,[{'generated_text': ''}] ) A__ = 3 A__ = generator( 'Something there',num_return_sequences=lowercase_,num_beams=lowercase_,) A__ = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(lowercase_,lowercase_ ) A__ = generator('This is a test',do_sample=lowercase_,num_return_sequences=2,return_tensors=lowercase_ ) self.assertEqual( lowercase_,[ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ],) A__ = generator.model.config.eos_token_id A__ = '<pad>' A__ = generator( ['This is a test', 'This is a second test'],do_sample=lowercase_,num_return_sequences=2,batch_size=2,return_tensors=lowercase_,) self.assertEqual( lowercase_,[ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ],) @require_tf def snake_case__ ( self : List[str] )-> List[str]: '''simple docstring''' A__ = pipeline('text2text-generation',model='patrickvonplaten/t5-tiny-random',framework='tf' ) # do_sample=False necessary for reproducibility A__ = generator('Something there',do_sample=lowercase_ ) self.assertEqual(lowercase_,[{'generated_text': ''}] )
7
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'open-llama' def __init__( self : Any,lowercase_ : Optional[int]=1_0_0_0_0_0,lowercase_ : Union[str, Any]=4_0_9_6,lowercase_ : Dict=1_1_0_0_8,lowercase_ : Dict=3_2,lowercase_ : Optional[int]=3_2,lowercase_ : Dict="silu",lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Optional[int]=0.02,lowercase_ : Dict=1E-6,lowercase_ : Dict=True,lowercase_ : List[Any]=0,lowercase_ : Optional[int]=1,lowercase_ : str=2,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : int=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=True,lowercase_ : Any=None,**lowercase_ : List[Any],)-> Tuple: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = initializer_range A__ = rms_norm_eps A__ = use_cache A__ = kwargs.pop( 'use_memorry_efficient_attention',lowercase_ ) A__ = hidden_dropout_prob A__ = attention_dropout_prob A__ = use_stable_embedding A__ = shared_input_output_embedding A__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,tie_word_embeddings=lowercase_,**lowercase_,) def snake_case__ ( self : str )-> str: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling,lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'got {self.rope_scaling}' ) A__ = self.rope_scaling.get('type',lowercase_ ) A__ = self.rope_scaling.get('factor',lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(lowercase_,lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
7
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase_ = { "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["MobileViTFeatureExtractor"] lowercase_ = ["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return EnvironmentCommand() class A ( _UpperCAmelCase ): """simple docstring""" @staticmethod def snake_case__ ( lowercase_ : ArgumentParser )-> Dict: '''simple docstring''' A__ = parser.add_parser('env' ) download_parser.set_defaults(func=lowercase_ ) def snake_case__ ( self : List[Any] )-> List[str]: '''simple docstring''' A__ = huggingface_hub.__version__ A__ = 'not installed' A__ = 'NA' if is_torch_available(): import torch A__ = torch.__version__ A__ = torch.cuda.is_available() A__ = 'not installed' if is_transformers_available(): import transformers A__ = transformers.__version__ A__ = 'not installed' if is_accelerate_available(): import accelerate A__ = accelerate.__version__ A__ = 'not installed' if is_xformers_available(): import xformers A__ = xformers.__version__ A__ = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})', 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(lowercase_ ) ) return info @staticmethod def snake_case__ ( lowercase_ : int )-> Optional[Any]: '''simple docstring''' return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
7
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging lowercase_ = logging.get_logger(__name__) lowercase_ = {"vocab_file": "spiece.model"} lowercase_ = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", } } lowercase_ = { "xlnet-base-cased": None, "xlnet-large-cased": None, } # Segments (not really needed) lowercase_ = 0 lowercase_ = 1 lowercase_ = 2 lowercase_ = 3 lowercase_ = 4 class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = 'left' def __init__( self : Dict,lowercase_ : List[Any],lowercase_ : Dict=False,lowercase_ : List[str]=True,lowercase_ : Any=False,lowercase_ : Optional[int]="<s>",lowercase_ : List[str]="</s>",lowercase_ : List[str]="<unk>",lowercase_ : str="<sep>",lowercase_ : str="<pad>",lowercase_ : List[str]="<cls>",lowercase_ : Dict="<mask>",lowercase_ : Tuple=["<eop>", "<eod>"],lowercase_ : Optional[Dict[str, Any]] = None,**lowercase_ : Optional[Any],)-> None: '''simple docstring''' A__ = AddedToken(lowercase_,lstrip=lowercase_,rstrip=lowercase_ ) if isinstance(lowercase_,lowercase_ ) else mask_token A__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowercase_,remove_space=lowercase_,keep_accents=lowercase_,bos_token=lowercase_,eos_token=lowercase_,unk_token=lowercase_,sep_token=lowercase_,pad_token=lowercase_,cls_token=lowercase_,mask_token=lowercase_,additional_special_tokens=lowercase_,sp_model_kwargs=self.sp_model_kwargs,**lowercase_,) A__ = 3 A__ = do_lower_case A__ = remove_space A__ = keep_accents A__ = vocab_file A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase_ ) @property def snake_case__ ( self : List[str] )-> Optional[Any]: '''simple docstring''' return len(self.sp_model ) def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : Optional[Any],lowercase_ : Optional[int] )-> Union[str, Any]: '''simple docstring''' A__ = d # for backward compatibility if not hasattr(self,'sp_model_kwargs' ): A__ = {} A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__ ( self : int,lowercase_ : List[Any] )-> List[Any]: '''simple docstring''' if self.remove_space: A__ = ' '.join(inputs.strip().split() ) else: A__ = inputs A__ = outputs.replace('``','"' ).replace('\'\'','"' ) if not self.keep_accents: A__ = unicodedata.normalize('NFKD',lowercase_ ) A__ = ''.join([c for c in outputs if not unicodedata.combining(lowercase_ )] ) if self.do_lower_case: A__ = outputs.lower() return outputs def snake_case__ ( self : Optional[Any],lowercase_ : str )-> List[str]: '''simple docstring''' A__ = self.preprocess_text(lowercase_ ) A__ = self.sp_model.encode(lowercase_,out_type=lowercase_ ) A__ = [] for piece in pieces: if len(lowercase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): A__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_,'' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: A__ = cur_pieces[1:] else: A__ = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(lowercase_ ) else: new_pieces.append(lowercase_ ) return new_pieces def snake_case__ ( self : Dict,lowercase_ : Any )-> Optional[Any]: '''simple docstring''' return self.sp_model.PieceToId(lowercase_ ) def snake_case__ ( self : Optional[Any],lowercase_ : Any )-> Optional[int]: '''simple docstring''' return self.sp_model.IdToPiece(lowercase_ ) def snake_case__ ( self : Optional[Any],lowercase_ : str )-> Union[str, Any]: '''simple docstring''' A__ = ''.join(lowercase_ ).replace(lowercase_,' ' ).strip() return out_string def snake_case__ ( self : Union[str, Any],lowercase_ : List[int],lowercase_ : bool = False,lowercase_ : bool = None,lowercase_ : bool = True,**lowercase_ : List[str],)-> str: '''simple docstring''' A__ = kwargs.pop('use_source_tokenizer',lowercase_ ) A__ = self.convert_ids_to_tokens(lowercase_,skip_special_tokens=lowercase_ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 A__ = [] A__ = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowercase_ ) ) A__ = [] sub_texts.append(lowercase_ ) else: current_sub_text.append(lowercase_ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(lowercase_ ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens A__ = ''.join(lowercase_ ) A__ = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: A__ = self.clean_up_tokenization(lowercase_ ) return clean_text else: return text def snake_case__ ( self : List[Any],lowercase_ : List[int],lowercase_ : Optional[List[int]] = None )-> List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def snake_case__ ( self : str,lowercase_ : List[int],lowercase_ : Optional[List[int]] = None,lowercase_ : bool = False )-> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_,token_ids_a=lowercase_,already_has_special_tokens=lowercase_ ) if token_ids_a is not None: return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1, 1] return ([0] * len(lowercase_ )) + [1, 1] def snake_case__ ( self : List[str],lowercase_ : List[int],lowercase_ : Optional[List[int]] = None )-> List[int]: '''simple docstring''' A__ = [self.sep_token_id] A__ = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def snake_case__ ( self : List[Any],lowercase_ : str,lowercase_ : Optional[str] = None )-> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowercase_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A__ = os.path.join( lowercase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file,lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_,'wb' ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,)
7
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ReformerTokenizer lowerCamelCase = ReformerTokenizerFast lowerCamelCase = True lowerCamelCase = False lowerCamelCase = True def snake_case__ ( self : Any )-> str: '''simple docstring''' super().setUp() A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : Optional[int] )-> Optional[int]: '''simple docstring''' A__ = '<s>' A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0],'<unk>' ) self.assertEqual(vocab_keys[1],'<s>' ) self.assertEqual(vocab_keys[-1],'j' ) self.assertEqual(len(lowercase_ ),1_0_0_0 ) def snake_case__ ( self : Dict )-> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size,1_0_0_0 ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = 'I was born in 92000, and this is falsé.' A__ = tokenizer.tokenize(lowercase_ ) A__ = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(lowercase_ ) A__ = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) def snake_case__ ( self : int,lowercase_ : Optional[int]=1_5 )-> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ ) # Simple input A__ = 'This is a simple input' A__ = ['This is a simple input 1', 'This is a simple input 2'] A__ = ('This is a simple input', 'This is a pair') A__ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' ) # Simple input self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' ) # Simple input self.assertRaises( lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',) # Pair input self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' ) # Pair input self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' ) # Pair input self.assertRaises( lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',) def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' pass def snake_case__ ( self : Dict )-> str: '''simple docstring''' A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ ) A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ),[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2],) A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ],) A__ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4],) A__ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ],) @cached_property def snake_case__ ( self : Optional[int] )-> Any: '''simple docstring''' return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' ) @slow def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = 'Hello World!' A__ = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7] self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) ) @slow def snake_case__ ( self : Optional[int] )-> str: '''simple docstring''' A__ = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) A__ = [ 1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 3_5, 2_8, 2_7_5, 3, 2_5_9, 2_9_7, 2_6_0, 8_4, 4, 3_5, 1_1_0, 4_4, 8, 2_5_9, 9_1, 2_6_8, 2_1, 1_1, 2_0_9, 2_7_4, 1_0_9, 2_6_6, 2_7_7, 1_1_7, 8_6, 9_3, 3_1_5, 2_5_8, 2_7_8, 2_5_8, 2_7_7, 2_5_8, 0, 2_5_8, 2_8_8, 2_5_8, 3_1_9, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 2_8_7, 2_5_8, 3_1_5, 2_5_8, 2_8_9, 2_5_8, 2_7_8, 9_9, 2_6_9, 2_6_6, 2_6_2, 8, 2_5_9, 2_4_1, 4, 2_1_7, 2_3_0, 2_6_8, 2_6_6, 5_5, 1_6_8, 1_0_6, 7_5, 1_9_3, 2_6_6, 2_2_3, 2_7, 4_9, 2_6, 2_8_2, 2_5, 2_6_4, 2_9_9, 1_9, 2_6, 0, 2_5_8, 2_7_7, 1_1_7, 8_6, 9_3, 1_7_6, 1_8_3, 2_7_0, 1_1, 2_6_2, 4_2, 6_1, 2_6_5, ] self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) ) @require_torch @slow def snake_case__ ( self : int )-> Any: '''simple docstring''' import torch from transformers import ReformerConfig, ReformerModel # Build sequence A__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0] A__ = ' '.join(lowercase_ ) A__ = self.big_tokenizer.encode_plus(lowercase_,return_tensors='pt' ) A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors='pt' ) A__ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) A__ = encoded_sequence['input_ids'].shape A__ = ReformerModel(lowercase_ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase_ ) model(**lowercase_ ) @slow def snake_case__ ( self : int )-> Tuple: '''simple docstring''' A__ = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 A__ = [ 'This is a very simple sentence.', 'The quick brown fox jumps over the lazy dog.', ] self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='google/reformer-crime-and-punishment',revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a',padding=lowercase_,sequences=lowercase_,)
7
1
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. lowercase_ = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class A ( unittest.TestCase ): """simple docstring""" lowerCamelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowerCamelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowerCamelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowerCamelCase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def snake_case__ ( self : Any,lowercase_ : Any,lowercase_ : Union[str, Any],lowercase_ : int )-> List[str]: '''simple docstring''' A__ = ZeroShotClassificationPipeline( model=lowercase_,tokenizer=lowercase_,candidate_labels=['polics', 'health'] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def snake_case__ ( self : List[str],lowercase_ : str,lowercase_ : int )-> Optional[int]: '''simple docstring''' A__ = classifier('Who are you voting for in 2020?',candidate_labels='politics' ) self.assertEqual(lowercase_,{'sequence': ANY(lowercase_ ), 'labels': [ANY(lowercase_ )], 'scores': [ANY(lowercase_ )]} ) # No kwarg A__ = classifier('Who are you voting for in 2020?',['politics'] ) self.assertEqual(lowercase_,{'sequence': ANY(lowercase_ ), 'labels': [ANY(lowercase_ )], 'scores': [ANY(lowercase_ )]} ) A__ = classifier('Who are you voting for in 2020?',candidate_labels=['politics'] ) self.assertEqual(lowercase_,{'sequence': ANY(lowercase_ ), 'labels': [ANY(lowercase_ )], 'scores': [ANY(lowercase_ )]} ) A__ = classifier('Who are you voting for in 2020?',candidate_labels='politics, public health' ) self.assertEqual( lowercase_,{'sequence': ANY(lowercase_ ), 'labels': [ANY(lowercase_ ), ANY(lowercase_ )], 'scores': [ANY(lowercase_ ), ANY(lowercase_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ),1.0 ) A__ = classifier('Who are you voting for in 2020?',candidate_labels=['politics', 'public health'] ) self.assertEqual( lowercase_,{'sequence': ANY(lowercase_ ), 'labels': [ANY(lowercase_ ), ANY(lowercase_ )], 'scores': [ANY(lowercase_ ), ANY(lowercase_ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ),1.0 ) A__ = classifier( 'Who are you voting for in 2020?',candidate_labels='politics',hypothesis_template='This text is about {}' ) self.assertEqual(lowercase_,{'sequence': ANY(lowercase_ ), 'labels': [ANY(lowercase_ )], 'scores': [ANY(lowercase_ )]} ) # https://github.com/huggingface/transformers/issues/13846 A__ = classifier(['I am happy'],['positive', 'negative'] ) self.assertEqual( lowercase_,[ {'sequence': ANY(lowercase_ ), 'labels': [ANY(lowercase_ ), ANY(lowercase_ )], 'scores': [ANY(lowercase_ ), ANY(lowercase_ )]} for i in range(1 ) ],) A__ = classifier(['I am happy', 'I am sad'],['positive', 'negative'] ) self.assertEqual( lowercase_,[ {'sequence': ANY(lowercase_ ), 'labels': [ANY(lowercase_ ), ANY(lowercase_ )], 'scores': [ANY(lowercase_ ), ANY(lowercase_ )]} for i in range(2 ) ],) with self.assertRaises(lowercase_ ): classifier('',candidate_labels='politics' ) with self.assertRaises(lowercase_ ): classifier(lowercase_,candidate_labels='politics' ) with self.assertRaises(lowercase_ ): classifier('Who are you voting for in 2020?',candidate_labels='' ) with self.assertRaises(lowercase_ ): classifier('Who are you voting for in 2020?',candidate_labels=lowercase_ ) with self.assertRaises(lowercase_ ): classifier( 'Who are you voting for in 2020?',candidate_labels='politics',hypothesis_template='Not formatting template',) with self.assertRaises(lowercase_ ): classifier( 'Who are you voting for in 2020?',candidate_labels='politics',hypothesis_template=lowercase_,) self.run_entailment_id(lowercase_ ) def snake_case__ ( self : Optional[Any],lowercase_ : Pipeline )-> List[Any]: '''simple docstring''' A__ = zero_shot_classifier.model.config A__ = config.labelaid A__ = zero_shot_classifier.entailment_id A__ = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2} self.assertEqual(zero_shot_classifier.entailment_id,-1 ) A__ = {'entailment': 0, 'neutral': 1, 'contradiction': 2} self.assertEqual(zero_shot_classifier.entailment_id,0 ) A__ = {'ENTAIL': 0, 'NON-ENTAIL': 1} self.assertEqual(zero_shot_classifier.entailment_id,0 ) A__ = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0} self.assertEqual(zero_shot_classifier.entailment_id,2 ) A__ = original_labelaid self.assertEqual(lowercase_,zero_shot_classifier.entailment_id ) @require_torch def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' A__ = pipeline( 'zero-shot-classification',model='sshleifer/tiny-distilbert-base-cased-distilled-squad',framework='pt',) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( 'Who are you voting for in 2020?' * 1_0_0,candidate_labels=['politics', 'public health', 'science'] ) @require_torch def snake_case__ ( self : str )-> int: '''simple docstring''' A__ = pipeline( 'zero-shot-classification',model='sshleifer/tiny-distilbert-base-cased-distilled-squad',framework='pt',) A__ = zero_shot_classifier( 'Who are you voting for in 2020?',candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(lowercase_ ),{ 'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.333, 0.333, 0.333], },) @require_tf def snake_case__ ( self : List[str] )-> Tuple: '''simple docstring''' A__ = pipeline( 'zero-shot-classification',model='sshleifer/tiny-distilbert-base-cased-distilled-squad',framework='tf',) A__ = zero_shot_classifier( 'Who are you voting for in 2020?',candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(lowercase_ ),{ 'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.333, 0.333, 0.333], },) @slow @require_torch def snake_case__ ( self : Tuple )-> Tuple: '''simple docstring''' A__ = pipeline('zero-shot-classification',model='roberta-large-mnli',framework='pt' ) A__ = zero_shot_classifier( 'Who are you voting for in 2020?',candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(lowercase_ ),{ 'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.976, 0.015, 0.009], },) A__ = zero_shot_classifier( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks' ' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder' ' through an attention mechanism. We propose a new simple network architecture, the Transformer, based' ' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two' ' machine translation tasks show these models to be superior in quality while being more parallelizable' ' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014' ' English-to-German translation task, improving over the existing best results, including ensembles by' ' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new' ' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small' ' fraction of the training costs of the best models from the literature. We show that the Transformer' ' generalizes well to other tasks by applying it successfully to English constituency parsing both with' ' large and limited training data.',candidate_labels=['machine learning', 'statistics', 'translation', 'vision'],multi_label=lowercase_,) self.assertEqual( nested_simplify(lowercase_ ),{ 'sequence': ( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural' ' networks in an encoder-decoder configuration. The best performing models also connect the' ' encoder and decoder through an attention mechanism. We propose a new simple network' ' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence' ' and convolutions entirely. Experiments on two machine translation tasks show these models to be' ' superior in quality while being more parallelizable and requiring significantly less time to' ' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,' ' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014' ' English-to-French translation task, our model establishes a new single-model state-of-the-art' ' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training' ' costs of the best models from the literature. We show that the Transformer generalizes well to' ' other tasks by applying it successfully to English constituency parsing both with large and' ' limited training data.' ), 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.817, 0.713, 0.018, 0.018], },) @slow @require_tf def snake_case__ ( self : Any )-> int: '''simple docstring''' A__ = pipeline('zero-shot-classification',model='roberta-large-mnli',framework='tf' ) A__ = zero_shot_classifier( 'Who are you voting for in 2020?',candidate_labels=['politics', 'public health', 'science'] ) self.assertEqual( nested_simplify(lowercase_ ),{ 'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.976, 0.015, 0.009], },) A__ = zero_shot_classifier( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks' ' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder' ' through an attention mechanism. We propose a new simple network architecture, the Transformer, based' ' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two' ' machine translation tasks show these models to be superior in quality while being more parallelizable' ' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014' ' English-to-German translation task, improving over the existing best results, including ensembles by' ' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new' ' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small' ' fraction of the training costs of the best models from the literature. We show that the Transformer' ' generalizes well to other tasks by applying it successfully to English constituency parsing both with' ' large and limited training data.',candidate_labels=['machine learning', 'statistics', 'translation', 'vision'],multi_label=lowercase_,) self.assertEqual( nested_simplify(lowercase_ ),{ 'sequence': ( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural' ' networks in an encoder-decoder configuration. The best performing models also connect the' ' encoder and decoder through an attention mechanism. We propose a new simple network' ' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence' ' and convolutions entirely. Experiments on two machine translation tasks show these models to be' ' superior in quality while being more parallelizable and requiring significantly less time to' ' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,' ' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014' ' English-to-French translation task, our model establishes a new single-model state-of-the-art' ' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training' ' costs of the best models from the literature. We show that the Transformer generalizes well to' ' other tasks by applying it successfully to English constituency parsing both with large and' ' limited training data.' ), 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.817, 0.713, 0.018, 0.018], },)
7
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , ) -> float: '''simple docstring''' A__ = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: A__ = 1 - (matter_density + radiation_density + dark_energy) A__ = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) A__ = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation lowercase_ = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
7
1
from __future__ import annotations lowercase_ = list[tuple[int, int]] lowercase_ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A : """simple docstring""" def __init__( self : Optional[Any],lowercase_ : int,lowercase_ : int,lowercase_ : int,lowercase_ : int,lowercase_ : float,lowercase_ : Node | None,)-> Optional[int]: '''simple docstring''' A__ = pos_x A__ = pos_y A__ = (pos_y, pos_x) A__ = goal_x A__ = goal_y A__ = g_cost A__ = parent A__ = self.calculate_heuristic() def snake_case__ ( self : Dict )-> float: '''simple docstring''' A__ = abs(self.pos_x - self.goal_x ) A__ = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : Tuple,lowercase_ : Optional[Any] )-> bool: '''simple docstring''' return self.f_cost < other.f_cost class A : """simple docstring""" def __init__( self : Tuple,lowercase_ : tuple[int, int],lowercase_ : tuple[int, int] )-> Dict: '''simple docstring''' A__ = Node(start[1],start[0],goal[1],goal[0],0,lowercase_ ) A__ = Node(goal[1],goal[0],goal[1],goal[0],9_9_9_9_9,lowercase_ ) A__ = [self.start] A__ = [] A__ = False def snake_case__ ( self : Optional[int] )-> Path | None: '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A__ = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: A__ = True return self.retrace_path(lowercase_ ) self.closed_nodes.append(lowercase_ ) A__ = self.get_successors(lowercase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowercase_ ) else: # retrieve the best current path A__ = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowercase_ ) else: self.open_nodes.append(lowercase_ ) if not self.reached: return [self.start.pos] return None def snake_case__ ( self : Any,lowercase_ : Node )-> list[Node]: '''simple docstring''' A__ = [] for action in delta: A__ = parent.pos_x + action[1] A__ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowercase_,lowercase_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,lowercase_,) ) return successors def snake_case__ ( self : List[str],lowercase_ : Node | None )-> Path: '''simple docstring''' A__ = node A__ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A__ = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase_ = (0, 0) lowercase_ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("------") lowercase_ = GreedyBestFirst(init, goal) lowercase_ = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase_ = 2 for elem in grid: print(elem)
7
from typing import Union import fire import torch from tqdm import tqdm def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str = "cpu" , SCREAMING_SNAKE_CASE__ : Union[str, None] = None ) -> None: '''simple docstring''' A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ ) for k, v in tqdm(state_dict.items() ): if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' ) A__ = v.half() if save_path is None: # overwrite src_path A__ = src_path torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": fire.Fire(convert)
7
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = "▁" lowercase_ = {"vocab_file": "sentencepiece.bpe.model"} lowercase_ = { "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } lowercase_ = { "facebook/mbart-large-50-one-to-many-mmt": 1024, } # fmt: off lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = ['input_ids', 'attention_mask'] lowerCamelCase = [] lowerCamelCase = [] def __init__( self : List[Any],lowercase_ : Optional[Any],lowercase_ : List[Any]=None,lowercase_ : List[str]=None,lowercase_ : List[Any]="</s>",lowercase_ : List[Any]="</s>",lowercase_ : Dict="<s>",lowercase_ : Union[str, Any]="<unk>",lowercase_ : Dict="<pad>",lowercase_ : str="<mask>",lowercase_ : Optional[Dict[str, Any]] = None,**lowercase_ : List[Any],)-> None: '''simple docstring''' A__ = AddedToken(lowercase_,lstrip=lowercase_,rstrip=lowercase_ ) if isinstance(lowercase_,lowercase_ ) else mask_token A__ = {} if sp_model_kwargs is None else sp_model_kwargs A__ = kwargs.get('additional_special_tokens',[] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=lowercase_,tgt_lang=lowercase_,eos_token=lowercase_,unk_token=lowercase_,sep_token=lowercase_,cls_token=lowercase_,pad_token=lowercase_,mask_token=lowercase_,sp_model_kwargs=self.sp_model_kwargs,**lowercase_,) A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowercase_ ) ) A__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token A__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab A__ = 1 A__ = len(self.sp_model ) A__ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase_ ) } A__ = {v: k for k, v in self.lang_code_to_id.items()} A__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} A__ = src_lang if src_lang is not None else 'en_XX' A__ = self.lang_code_to_id[self._src_lang] A__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def snake_case__ ( self : Tuple )-> int: '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def snake_case__ ( self : List[Any] )-> str: '''simple docstring''' return self._src_lang @src_lang.setter def snake_case__ ( self : List[Any],lowercase_ : str )-> None: '''simple docstring''' A__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Any )-> Dict: '''simple docstring''' A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : Tuple,lowercase_ : Dict )-> None: '''simple docstring''' A__ = d # for backward compatibility if not hasattr(self,'sp_model_kwargs' ): A__ = {} A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__ ( self : List[str] )-> Dict: '''simple docstring''' A__ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def snake_case__ ( self : int,lowercase_ : str )-> List[str]: '''simple docstring''' return self.sp_model.encode(lowercase_,out_type=lowercase_ ) def snake_case__ ( self : Optional[Any],lowercase_ : str )-> int: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A__ = self.sp_model.PieceToId(lowercase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def snake_case__ ( self : Optional[int],lowercase_ : int )-> str: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def snake_case__ ( self : Optional[int],lowercase_ : Tuple )-> List[str]: '''simple docstring''' A__ = [] A__ = '' A__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowercase_ ) + token A__ = True A__ = [] else: current_sub_tokens.append(lowercase_ ) A__ = False out_string += self.sp_model.decode(lowercase_ ) return out_string.strip() def snake_case__ ( self : Any,lowercase_ : str,lowercase_ : Optional[str] = None )-> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowercase_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A__ = os.path.join( lowercase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file,lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_,'wb' ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,) def snake_case__ ( self : int,lowercase_ : List[int],lowercase_ : Optional[List[int]] = None,lowercase_ : bool = False )-> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_,token_ids_a=lowercase_,already_has_special_tokens=lowercase_ ) A__ = [1] * len(self.prefix_tokens ) A__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones def snake_case__ ( self : Union[str, Any],lowercase_ : List[int],lowercase_ : Optional[List[int]] = None )-> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def snake_case__ ( self : Optional[int],lowercase_ : Dict,lowercase_ : str,lowercase_ : Optional[str],lowercase_ : Optional[str],**lowercase_ : Any )-> Union[str, Any]: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) A__ = src_lang A__ = self(lowercase_,add_special_tokens=lowercase_,return_tensors=lowercase_,**lowercase_ ) A__ = self.convert_tokens_to_ids(lowercase_ ) A__ = tgt_lang_id return inputs def snake_case__ ( self : List[Any],lowercase_ : List[str],lowercase_ : str = "en_XX",lowercase_ : Optional[List[str]] = None,lowercase_ : str = "ro_RO",**lowercase_ : Optional[int],)-> BatchEncoding: '''simple docstring''' A__ = src_lang A__ = tgt_lang return super().prepare_seqaseq_batch(lowercase_,lowercase_,**lowercase_ ) def snake_case__ ( self : Any )-> List[str]: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def snake_case__ ( self : int )-> Tuple: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def snake_case__ ( self : List[str],lowercase_ : str )-> None: '''simple docstring''' A__ = self.lang_code_to_id[src_lang] A__ = [self.cur_lang_code_id] A__ = [self.eos_token_id] def snake_case__ ( self : Union[str, Any],lowercase_ : str )-> None: '''simple docstring''' A__ = self.lang_code_to_id[tgt_lang] A__ = [self.cur_lang_code_id] A__ = [self.eos_token_id]
7
import os # Precomputes a list of the 100 first triangular numbers lowercase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def _snake_case( ) -> int: '''simple docstring''' A__ = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE__ ) ) A__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'words.txt' ) A__ = '' with open(SCREAMING_SNAKE_CASE__ ) as f: A__ = f.readline() A__ = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A__ = [ word for word in [sum(ord(SCREAMING_SNAKE_CASE__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": print(solution())
7
1
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ) -> float: '''simple docstring''' _validate_point(SCREAMING_SNAKE_CASE__ ) _validate_point(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ): raise ValueError('Both points must be in the same n-dimensional space' ) return float(sum(abs(a - b ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ) def _snake_case( SCREAMING_SNAKE_CASE__ : list[float] ) -> None: '''simple docstring''' if point: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for item in point: if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): A__ = ( 'Expected a list of numbers as input, found ' f'{type(SCREAMING_SNAKE_CASE__ ).__name__}' ) raise TypeError(SCREAMING_SNAKE_CASE__ ) else: A__ = f'Expected a list of numbers as input, found {type(SCREAMING_SNAKE_CASE__ ).__name__}' raise TypeError(SCREAMING_SNAKE_CASE__ ) else: raise ValueError('Missing an input' ) def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ) -> float: '''simple docstring''' _validate_point(SCREAMING_SNAKE_CASE__ ) _validate_point(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ): raise ValueError('Both points must be in the same n-dimensional space' ) return float(sum(abs(x - y ) for x, y in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
7
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin lowercase_ = False @skip_mps class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = StableDiffusionAttendAndExcitePipeline lowerCamelCase = False lowerCamelCase = TEXT_TO_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} ) lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def snake_case__ ( cls : Any )-> Optional[Any]: '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(lowercase_ ) @classmethod def snake_case__ ( cls : Optional[Any] )-> Dict: '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(lowercase_ ) def snake_case__ ( self : List[str] )-> int: '''simple docstring''' torch.manual_seed(0 ) A__ = UNetaDConditionModel( block_out_channels=(3_2, 6_4),layers_per_block=1,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'),up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'),cross_attention_dim=3_2,attention_head_dim=(2, 4),use_linear_projection=lowercase_,) A__ = DDIMScheduler( beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,) torch.manual_seed(0 ) A__ = AutoencoderKL( block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,) torch.manual_seed(0 ) A__ = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,) A__ = CLIPTextModel(lowercase_ ) A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) A__ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : List[Any]=0 )-> int: '''simple docstring''' if str(lowercase_ ).startswith('mps' ): A__ = torch.manual_seed(lowercase_ ) else: A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) A__ = A__ = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def snake_case__ ( self : List[str] )-> Optional[Any]: '''simple docstring''' A__ = 'cpu' A__ = self.get_dummy_components() A__ = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) A__ = self.get_dummy_inputs(lowercase_ ) A__ = pipe(**lowercase_ ).images A__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape,(1, 6_4, 6_4, 3) ) A__ = np.array( [0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] ) A__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_,1E-3 ) def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def snake_case__ ( self : str )-> int: '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def snake_case__ ( self : str )-> Optional[int]: '''simple docstring''' self._test_inference_batch_single_identical(batch_size=2,expected_max_diff=7E-4 ) def snake_case__ ( self : Optional[Any] )-> int: '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def snake_case__ ( self : Dict )-> Any: '''simple docstring''' super().test_save_load_local(expected_max_difference=5E-4 ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class A ( unittest.TestCase ): """simple docstring""" @classmethod def snake_case__ ( cls : Any )-> Optional[int]: '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(lowercase_ ) @classmethod def snake_case__ ( cls : int )-> List[Any]: '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(lowercase_ ) def snake_case__ ( self : List[Any] )-> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' A__ = torch.manual_seed(5_1 ) A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4',safety_checker=lowercase_,torch_dtype=torch.floataa ) pipe.to('cuda' ) A__ = 'a painting of an elephant with glasses' A__ = [5, 7] A__ = pipe( prompt=lowercase_,token_indices=lowercase_,guidance_scale=7.5,generator=lowercase_,num_inference_steps=5,max_iter_to_alter=5,output_type='numpy',).images[0] A__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' ) assert np.abs((expected_image - image).max() ) < 5E-1
7
1
from manim import * class A ( _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : Any )-> Optional[int]: '''simple docstring''' A__ = Rectangle(height=0.5,width=0.5 ) A__ = Rectangle(height=0.25,width=0.25 ) A__ = Rectangle(height=0.46,width=0.46 ).set_stroke(width=0 ) A__ = [mem.copy() for i in range(6 )] A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 ) A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 ) A__ = VGroup(lowercase_,lowercase_ ).arrange(lowercase_,buff=0 ) A__ = Text('CPU',font_size=2_4 ) A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase_ ) A__ = [mem.copy() for i in range(4 )] A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 ) A__ = Text('GPU',font_size=2_4 ) A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowercase_ ) A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 ) A__ = Text('Model',font_size=2_4 ) A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ ) model.move_to([3, -1.0, 0] ) self.add(lowercase_ ) A__ = [] A__ = [] A__ = [] for i, rect in enumerate(lowercase_ ): rect.set_stroke(lowercase_ ) A__ = Rectangle(height=0.46 / 4,width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_,opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ),buff=0.02,direction=lowercase_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0],direction=lowercase_,buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1],direction=lowercase_,buff=0.0 ) self.add(lowercase_ ) model_cpu_arr.append(lowercase_ ) self.add(*lowercase_,*lowercase_,*lowercase_ ) A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 ) A__ = Text('Loaded Checkpoint',font_size=2_4 ) A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ ) checkpoint.move_to([3, 0.5, 0] ) self.add(lowercase_ ) A__ = [] A__ = [] for i, rect in enumerate(lowercase_ ): A__ = fill.copy().set_fill(lowercase_,opacity=0.7 ) target.move_to(lowercase_ ) ckpt_arr.append(lowercase_ ) A__ = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(lowercase_ ) self.add(*lowercase_,*lowercase_ ) A__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) A__ = MarkupText( F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model',font_size=1_8,) key_text.move_to([-5, 2.4, 0] ) self.add(lowercase_,lowercase_ ) A__ = MarkupText( F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint',font_size=1_8,) blue_text.next_to(lowercase_,DOWN * 2.4,aligned_edge=key_text.get_left() ) self.add(lowercase_ ) A__ = MarkupText( F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.',font_size=2_4,) step_a.move_to([2, 2, 0] ) A__ = [meta_mem.copy() for i in range(6 )] A__ = [meta_mem.copy() for i in range(6 )] A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 ) A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 ) A__ = VGroup(lowercase_,lowercase_ ).arrange(lowercase_,buff=0 ) A__ = Text('Disk',font_size=2_4 ) A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(lowercase_,run_time=3 ),Write(lowercase_,run_time=1 ),Create(lowercase_,run_time=1 ) ) A__ = [] for i, rect in enumerate(lowercase_ ): A__ = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(lowercase_,run_time=1.5 ) ) self.play(*lowercase_ ) self.play(FadeOut(lowercase_ ) ) A__ = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.',font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase_,run_time=3 ) ) self.play( FadeOut(lowercase_,lowercase_,*lowercase_,*lowercase_ ),) self.wait()
7
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowercase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : tuple , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , ) -> Union[str, Any]: '''simple docstring''' output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , ) else: export( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , ) @torch.no_grad() def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> Tuple: '''simple docstring''' A__ = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): A__ = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: A__ = 'cpu' A__ = Path(SCREAMING_SNAKE_CASE__ ) # VAE DECODER A__ = AutoencoderKL.from_pretrained(model_path + '/vae' ) A__ = vae_decoder.config.latent_channels # forward only through the decoder part A__ = vae_decoder.decode onnx_export( SCREAMING_SNAKE_CASE__ , model_args=( torch.randn(1 , SCREAMING_SNAKE_CASE__ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=SCREAMING_SNAKE_CASE__ , ) del vae_decoder if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowercase_ = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
7
1
import warnings warnings.warn( "memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: " "`from accelerate import find_executable_batch_size` to avoid this warning.", FutureWarning, )
7
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = (DPMSolverSinglestepScheduler,) lowerCamelCase = (('num_inference_steps', 25),) def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Optional[int]: '''simple docstring''' A__ = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'prediction_type': 'epsilon', 'thresholding': False, 'sample_max_value': 1.0, 'algorithm_type': 'dpmsolver++', 'solver_type': 'midpoint', 'lambda_min_clipped': -float('inf' ), 'variance_type': None, } config.update(**lowercase_ ) return config def snake_case__ ( self : str,lowercase_ : Optional[Any]=0,**lowercase_ : Any )-> List[Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ , A__ = sample, sample for t in range(lowercase_,time_step + scheduler.config.solver_order + 1 ): A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : List[str] )-> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any]=0,**lowercase_ : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=None,**lowercase_ : int )-> int: '''simple docstring''' if scheduler is None: A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample return sample def snake_case__ ( self : Any )-> str: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = 5_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_574 ) < 1E-3 def snake_case__ ( self : Optional[Any] )-> List[Any]: '''simple docstring''' for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowercase_ ) def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 A__ = DEISMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverMultistepScheduler.from_config(scheduler.config ) A__ = UniPCMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Tuple )-> Any: '''simple docstring''' self.check_over_configs(thresholding=lowercase_ ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowercase_,prediction_type=lowercase_,sample_max_value=lowercase_,algorithm_type='dpmsolver++',solver_order=lowercase_,solver_type=lowercase_,) def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) A__ = self.full_loop( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers" def snake_case__ ( self : Optional[int] )-> Tuple: '''simple docstring''' self.check_over_configs(lower_order_final=lowercase_ ) self.check_over_configs(lower_order_final=lowercase_ ) def snake_case__ ( self : Tuple )-> Optional[int]: '''simple docstring''' self.check_over_configs(lambda_min_clipped=-float('inf' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def snake_case__ ( self : Optional[Any] )-> Tuple: '''simple docstring''' self.check_over_configs(variance_type=lowercase_ ) self.check_over_configs(variance_type='learned_range' ) def snake_case__ ( self : str )-> Any: '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=lowercase_,time_step=0 ) def snake_case__ ( self : Tuple )-> Tuple: '''simple docstring''' A__ = self.full_loop() A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Any )-> Union[str, Any]: '''simple docstring''' A__ = self.full_loop(use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_248 ) < 1E-3 def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction' ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.1_453 ) < 1E-3 def snake_case__ ( self : Tuple )-> int: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction',use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.0_649 ) < 1E-3 def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(thresholding=lowercase_,dynamic_thresholding_ratio=0 ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter.half() scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample assert sample.dtype == torch.floataa
7
1
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> str: '''simple docstring''' return "\n".join( f'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=10))
7
class A : """simple docstring""" def __init__( self : Any,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : List[str] )-> List[Any]: '''simple docstring''' A__ = name A__ = value A__ = weight def __repr__( self : int )-> Tuple: '''simple docstring''' return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def snake_case__ ( self : Any )-> str: '''simple docstring''' return self.value def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' return self.name def snake_case__ ( self : Any )-> Dict: '''simple docstring''' return self.weight def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' return self.value / self.weight def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: '''simple docstring''' A__ = [] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Any: '''simple docstring''' A__ = sorted(SCREAMING_SNAKE_CASE__ , key=SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ) A__ = [] A__ , A__ = 0.0, 0.0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _snake_case( ) -> Any: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
7
1
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = {"vocab_file": "vocab.txt"} lowercase_ = { "vocab_file": { "openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt", }, } lowercase_ = { "openbmb/cpm-ant-10b": 1024, } def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]: '''simple docstring''' A__ = collections.OrderedDict() with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as reader: A__ = reader.readlines() for index, token in enumerate(SCREAMING_SNAKE_CASE__ ): A__ = token.rstrip('\n' ) A__ = index return vocab class A ( _UpperCAmelCase ): """simple docstring""" def __init__( self : Union[str, Any],lowercase_ : Any,lowercase_ : Any="<unk>",lowercase_ : List[Any]=2_0_0 )-> int: '''simple docstring''' A__ = vocab A__ = unk_token A__ = max_input_chars_per_word def snake_case__ ( self : int,lowercase_ : Any )-> int: '''simple docstring''' A__ = list(lowercase_ ) if len(lowercase_ ) > self.max_input_chars_per_word: return [self.unk_token] A__ = 0 A__ = [] while start < len(lowercase_ ): A__ = len(lowercase_ ) A__ = None while start < end: A__ = ''.join(chars[start:end] ) if substr in self.vocab: A__ = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(lowercase_ ) A__ = end return sub_tokens class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['input_ids', 'attention_mask'] lowerCamelCase = False def __init__( self : str,lowercase_ : List[str],lowercase_ : Dict="<d>",lowercase_ : int="</d>",lowercase_ : int="<s>",lowercase_ : Optional[Any]="</s>",lowercase_ : Optional[Any]="<pad>",lowercase_ : Optional[int]="<unk>",lowercase_ : Any="</n>",lowercase_ : Dict="</_>",lowercase_ : Optional[int]="left",**lowercase_ : int,)-> int: '''simple docstring''' requires_backends(self,['jieba'] ) super().__init__( bod_token=lowercase_,eod_token=lowercase_,bos_token=lowercase_,eos_token=lowercase_,pad_token=lowercase_,unk_token=lowercase_,line_token=lowercase_,space_token=lowercase_,padding_side=lowercase_,**lowercase_,) A__ = bod_token A__ = eod_token A__ = load_vocab(lowercase_ ) A__ = self.encoder[space_token] A__ = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] A__ = collections.OrderedDict(sorted(self.encoder.items(),key=lambda lowercase_ : x[1] ) ) A__ = {v: k for k, v in self.encoder.items()} A__ = WordpieceTokenizer(vocab=self.encoder,unk_token=self.unk_token ) @property def snake_case__ ( self : Optional[Any] )-> Any: '''simple docstring''' return self.encoder[self.bod_token] @property def snake_case__ ( self : List[str] )-> Dict: '''simple docstring''' return self.encoder[self.eod_token] @property def snake_case__ ( self : Optional[Any] )-> Union[str, Any]: '''simple docstring''' return self.encoder["\n"] @property def snake_case__ ( self : Union[str, Any] )-> int: '''simple docstring''' return len(self.encoder ) def snake_case__ ( self : int )-> str: '''simple docstring''' return dict(self.encoder,**self.added_tokens_encoder ) def snake_case__ ( self : Union[str, Any],lowercase_ : Union[str, Any] )-> str: '''simple docstring''' A__ = [] for x in jieba.cut(lowercase_,cut_all=lowercase_ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowercase_ ) ) return output_tokens def snake_case__ ( self : List[str],lowercase_ : Optional[Any],**lowercase_ : int )-> int: '''simple docstring''' A__ = [i for i in token_ids if i >= 0] A__ = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(lowercase_,**lowercase_ ) def snake_case__ ( self : Optional[int],lowercase_ : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' return token in self.encoder def snake_case__ ( self : List[str],lowercase_ : List[str] )-> str: '''simple docstring''' return "".join(lowercase_ ) def snake_case__ ( self : Optional[Any],lowercase_ : List[Any] )-> Any: '''simple docstring''' return self.encoder.get(lowercase_,self.encoder.get(self.unk_token ) ) def snake_case__ ( self : Union[str, Any],lowercase_ : List[str] )-> List[str]: '''simple docstring''' return self.decoder.get(lowercase_,self.unk_token ) def snake_case__ ( self : Any,lowercase_ : str,lowercase_ : Optional[str] = None )-> Tuple[str]: '''simple docstring''' if os.path.isdir(lowercase_ ): A__ = os.path.join( lowercase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: A__ = (filename_prefix + '-' if filename_prefix else '') + save_directory A__ = 0 if " " in self.encoder: A__ = self.encoder[' '] del self.encoder[" "] if "\n" in self.encoder: A__ = self.encoder['\n'] del self.encoder["\n"] A__ = collections.OrderedDict(sorted(self.encoder.items(),key=lambda lowercase_ : x[1] ) ) with open(lowercase_,'w',encoding='utf-8' ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' ' Please check that the vocabulary is not corrupted!' ) A__ = token_index writer.write(token + '\n' ) index += 1 return (vocab_file,) def snake_case__ ( self : Optional[int],lowercase_ : List[int],lowercase_ : List[int] = None )-> List[int]: '''simple docstring''' if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def snake_case__ ( self : Optional[Any],lowercase_ : List[int],lowercase_ : Optional[List[int]] = None,lowercase_ : bool = False )-> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_,token_ids_a=lowercase_,already_has_special_tokens=lowercase_ ) if token_ids_a is not None: return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) return [1] + ([0] * len(lowercase_ ))
7
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase_ = logging.get_logger(__name__) lowercase_ = { "microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json", } class A ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'resnet' lowerCamelCase = ['basic', 'bottleneck'] def __init__( self : Optional[Any],lowercase_ : int=3,lowercase_ : List[str]=6_4,lowercase_ : int=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8],lowercase_ : Tuple=[3, 4, 6, 3],lowercase_ : Union[str, Any]="bottleneck",lowercase_ : List[str]="relu",lowercase_ : Tuple=False,lowercase_ : List[str]=None,lowercase_ : List[Any]=None,**lowercase_ : str,)-> Optional[Any]: '''simple docstring''' super().__init__(**lowercase_ ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) A__ = num_channels A__ = embedding_size A__ = hidden_sizes A__ = depths A__ = layer_type A__ = hidden_act A__ = downsample_in_first_stage A__ = ['stem'] + [F'stage{idx}' for idx in range(1,len(lowercase_ ) + 1 )] A__ , A__ = get_aligned_output_features_output_indices( out_features=lowercase_,out_indices=lowercase_,stage_names=self.stage_names ) class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = version.parse('1.11' ) @property def snake_case__ ( self : List[Any] )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def snake_case__ ( self : Any )-> float: '''simple docstring''' return 1E-3
7
1
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed lowercase_ = "true" def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=82 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 ) -> Optional[Any]: '''simple docstring''' set_seed(42 ) A__ = RegressionModel() A__ = deepcopy(SCREAMING_SNAKE_CASE__ ) A__ = RegressionDataset(length=SCREAMING_SNAKE_CASE__ ) A__ = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) model.to(accelerator.device ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model, ddp_model, dataloader def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' ) A__ = load_dataset('glue' , 'mrpc' , split='validation' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : List[Any] ): A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs with accelerator.main_process_first(): A__ = dataset.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) A__ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(SCREAMING_SNAKE_CASE__ : Dict ): if use_longest: return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=16 ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> str: '''simple docstring''' A__ = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE__ , split_batches=SCREAMING_SNAKE_CASE__ ) A__ = get_dataloader(SCREAMING_SNAKE_CASE__ , not dispatch_batches ) A__ = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: '''simple docstring''' A__ = [] for batch in dataloader: A__ , A__ = batch.values() with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) A__ , A__ = [], [] for logit, targ in logits_and_targets: logits.append(SCREAMING_SNAKE_CASE__ ) targs.append(SCREAMING_SNAKE_CASE__ ) A__ , A__ = torch.cat(SCREAMING_SNAKE_CASE__ ), torch.cat(SCREAMING_SNAKE_CASE__ ) return logits, targs def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int=82 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=16 ) -> List[Any]: '''simple docstring''' A__ , A__ , A__ = get_basic_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ , A__ = generate_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert ( len(SCREAMING_SNAKE_CASE__ ) == num_samples ), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE__ )}' def _snake_case( SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False ) -> str: '''simple docstring''' A__ = evaluate.load('glue' , 'mrpc' ) A__ , A__ = get_mrpc_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # First do baseline A__ , A__ , A__ = setup['no'] model.to(SCREAMING_SNAKE_CASE__ ) model.eval() for batch in dataloader: batch.to(SCREAMING_SNAKE_CASE__ ) with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=batch['labels'] ) A__ = metric.compute() # Then do distributed A__ , A__ , A__ = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) A__ = batch['labels'] A__ , A__ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ ) A__ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n' def _snake_case( ) -> Optional[Any]: '''simple docstring''' A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' ) test_mrpc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' ) test_torch_metrics(SCREAMING_SNAKE_CASE__ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**' ) A__ = Accelerator() test_torch_metrics(SCREAMING_SNAKE_CASE__ , 512 ) accelerator.state._reset_state() def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' main() if __name__ == "__main__": main()
7
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 't5' lowerCamelCase = ['past_key_values'] lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Union[str, Any],lowercase_ : int=3_2_1_2_8,lowercase_ : int=5_1_2,lowercase_ : List[str]=6_4,lowercase_ : Tuple=2_0_4_8,lowercase_ : Any=6,lowercase_ : List[str]=None,lowercase_ : Union[str, Any]=8,lowercase_ : int=3_2,lowercase_ : Dict=1_2_8,lowercase_ : Optional[int]=0.1,lowercase_ : List[str]=1E-6,lowercase_ : Tuple=1.0,lowercase_ : Any="relu",lowercase_ : Union[str, Any]=True,lowercase_ : Optional[Any]=True,lowercase_ : int=0,lowercase_ : str=1,**lowercase_ : str,)-> Any: '''simple docstring''' A__ = vocab_size A__ = d_model A__ = d_kv A__ = d_ff A__ = num_layers A__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry A__ = num_heads A__ = relative_attention_num_buckets A__ = relative_attention_max_distance A__ = dropout_rate A__ = layer_norm_epsilon A__ = initializer_factor A__ = feed_forward_proj A__ = use_cache A__ = self.feed_forward_proj.split('-' ) A__ = act_info[-1] A__ = act_info[0] == 'gated' if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": A__ = 'gelu_new' super().__init__( pad_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,**lowercase_,) class A ( _UpperCAmelCase ): """simple docstring""" @property def snake_case__ ( self : Tuple )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' A__ = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: A__ = 'past_encoder_sequence + sequence' A__ = {0: 'batch'} A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: A__ = {0: 'batch', 1: 'decoder_sequence'} A__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowercase_,direction='inputs' ) return common_inputs @property def snake_case__ ( self : Any )-> int: '''simple docstring''' return 1_3
7
1
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 42 @flax_register_to_config class A ( nn.Module , _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 32 lowerCamelCase = 4 lowerCamelCase = 4 lowerCamelCase = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) lowerCamelCase = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") lowerCamelCase = False lowerCamelCase = (3_20, 6_40, 12_80, 12_80) lowerCamelCase = 2 lowerCamelCase = 8 lowerCamelCase = None lowerCamelCase = 12_80 lowerCamelCase = 0.0 lowerCamelCase = False lowerCamelCase = jnp.floataa lowerCamelCase = True lowerCamelCase = 0 lowerCamelCase = False def snake_case__ ( self : str,lowercase_ : jax.random.KeyArray )-> FrozenDict: '''simple docstring''' A__ = (1, self.in_channels, self.sample_size, self.sample_size) A__ = jnp.zeros(lowercase_,dtype=jnp.floataa ) A__ = jnp.ones((1,),dtype=jnp.intaa ) A__ = jnp.zeros((1, 1, self.cross_attention_dim),dtype=jnp.floataa ) A__ , A__ = jax.random.split(lowercase_ ) A__ = {'params': params_rng, 'dropout': dropout_rng} return self.init(lowercase_,lowercase_,lowercase_,lowercase_ )["params"] def snake_case__ ( self : Any )-> Dict: '''simple docstring''' A__ = self.block_out_channels A__ = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( 'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. A__ = self.num_attention_heads or self.attention_head_dim # input A__ = nn.Conv( block_out_channels[0],kernel_size=(3, 3),strides=(1, 1),padding=((1, 1), (1, 1)),dtype=self.dtype,) # time A__ = FlaxTimesteps( block_out_channels[0],flip_sin_to_cos=self.flip_sin_to_cos,freq_shift=self.config.freq_shift ) A__ = FlaxTimestepEmbedding(lowercase_,dtype=self.dtype ) A__ = self.only_cross_attention if isinstance(lowercase_,lowercase_ ): A__ = (only_cross_attention,) * len(self.down_block_types ) if isinstance(lowercase_,lowercase_ ): A__ = (num_attention_heads,) * len(self.down_block_types ) # down A__ = [] A__ = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): A__ = output_channel A__ = block_out_channels[i] A__ = i == len(lowercase_ ) - 1 if down_block_type == "CrossAttnDownBlock2D": A__ = FlaxCrossAttnDownBlockaD( in_channels=lowercase_,out_channels=lowercase_,dropout=self.dropout,num_layers=self.layers_per_block,num_attention_heads=num_attention_heads[i],add_downsample=not is_final_block,use_linear_projection=self.use_linear_projection,only_cross_attention=only_cross_attention[i],use_memory_efficient_attention=self.use_memory_efficient_attention,dtype=self.dtype,) else: A__ = FlaxDownBlockaD( in_channels=lowercase_,out_channels=lowercase_,dropout=self.dropout,num_layers=self.layers_per_block,add_downsample=not is_final_block,dtype=self.dtype,) down_blocks.append(lowercase_ ) A__ = down_blocks # mid A__ = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1],dropout=self.dropout,num_attention_heads=num_attention_heads[-1],use_linear_projection=self.use_linear_projection,use_memory_efficient_attention=self.use_memory_efficient_attention,dtype=self.dtype,) # up A__ = [] A__ = list(reversed(lowercase_ ) ) A__ = list(reversed(lowercase_ ) ) A__ = list(reversed(lowercase_ ) ) A__ = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): A__ = output_channel A__ = reversed_block_out_channels[i] A__ = reversed_block_out_channels[min(i + 1,len(lowercase_ ) - 1 )] A__ = i == len(lowercase_ ) - 1 if up_block_type == "CrossAttnUpBlock2D": A__ = FlaxCrossAttnUpBlockaD( in_channels=lowercase_,out_channels=lowercase_,prev_output_channel=lowercase_,num_layers=self.layers_per_block + 1,num_attention_heads=reversed_num_attention_heads[i],add_upsample=not is_final_block,dropout=self.dropout,use_linear_projection=self.use_linear_projection,only_cross_attention=only_cross_attention[i],use_memory_efficient_attention=self.use_memory_efficient_attention,dtype=self.dtype,) else: A__ = FlaxUpBlockaD( in_channels=lowercase_,out_channels=lowercase_,prev_output_channel=lowercase_,num_layers=self.layers_per_block + 1,add_upsample=not is_final_block,dropout=self.dropout,dtype=self.dtype,) up_blocks.append(lowercase_ ) A__ = output_channel A__ = up_blocks # out A__ = nn.GroupNorm(num_groups=3_2,epsilon=1E-5 ) A__ = nn.Conv( self.out_channels,kernel_size=(3, 3),strides=(1, 1),padding=((1, 1), (1, 1)),dtype=self.dtype,) def __call__( self : Any,lowercase_ : str,lowercase_ : List[str],lowercase_ : List[Any],lowercase_ : Dict=None,lowercase_ : List[str]=None,lowercase_ : bool = True,lowercase_ : bool = False,)-> Union[FlaxUNetaDConditionOutput, Tuple]: '''simple docstring''' if not isinstance(lowercase_,jnp.ndarray ): A__ = jnp.array([timesteps],dtype=jnp.intaa ) elif isinstance(lowercase_,jnp.ndarray ) and len(timesteps.shape ) == 0: A__ = timesteps.astype(dtype=jnp.floataa ) A__ = jnp.expand_dims(lowercase_,0 ) A__ = self.time_proj(lowercase_ ) A__ = self.time_embedding(lowercase_ ) # 2. pre-process A__ = jnp.transpose(lowercase_,(0, 2, 3, 1) ) A__ = self.conv_in(lowercase_ ) # 3. down A__ = (sample,) for down_block in self.down_blocks: if isinstance(lowercase_,lowercase_ ): A__ , A__ = down_block(lowercase_,lowercase_,lowercase_,deterministic=not train ) else: A__ , A__ = down_block(lowercase_,lowercase_,deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: A__ = () for down_block_res_sample, down_block_additional_residual in zip( lowercase_,lowercase_ ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) A__ = new_down_block_res_samples # 4. mid A__ = self.mid_block(lowercase_,lowercase_,lowercase_,deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: A__ = down_block_res_samples[-(self.layers_per_block + 1) :] A__ = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(lowercase_,lowercase_ ): A__ = up_block( lowercase_,temb=lowercase_,encoder_hidden_states=lowercase_,res_hidden_states_tuple=lowercase_,deterministic=not train,) else: A__ = up_block(lowercase_,temb=lowercase_,res_hidden_states_tuple=lowercase_,deterministic=not train ) # 6. post-process A__ = self.conv_norm_out(lowercase_ ) A__ = nn.silu(lowercase_ ) A__ = self.conv_out(lowercase_ ) A__ = jnp.transpose(lowercase_,(0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=lowercase_ )
7
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: A__ = mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: A__ = max( mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - wt[i - 1] ) + val[i - 1] , ) A__ = val return f[i][j] def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple: '''simple docstring''' A__ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: A__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: A__ = dp[i - 1][w_] return dp[n][w_], dp def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ) -> Union[str, Any]: '''simple docstring''' if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) A__ = len(SCREAMING_SNAKE_CASE__ ) if num_items != len(SCREAMING_SNAKE_CASE__ ): A__ = ( 'The number of weights must be the same as the number of values.\n' f'But got {num_items} weights and {len(SCREAMING_SNAKE_CASE__ )} values' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ): if not isinstance(wt[i] , SCREAMING_SNAKE_CASE__ ): A__ = ( 'All weights must be integers but got weight of ' f'type {type(wt[i] )} at index {i}' ) raise TypeError(SCREAMING_SNAKE_CASE__ ) A__ , A__ = knapsack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = set() _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return optimal_val, example_optional_set def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : set ) -> Optional[int]: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: optimal_set.add(SCREAMING_SNAKE_CASE__ ) _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , j - wt[i - 1] , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = [3, 2, 4, 4] lowercase_ = [4, 3, 2, 3] lowercase_ = 4 lowercase_ = 6 lowercase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowercase_ , lowercase_ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowercase_ , lowercase_ = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
7
1
import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig lowercase_ = logging.get_logger(__name__) lowercase_ = "T5Config" def _snake_case( SCREAMING_SNAKE_CASE__ : jnp.array , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray: '''simple docstring''' A__ = jnp.zeros_like(SCREAMING_SNAKE_CASE__ ) A__ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) A__ = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE__ ) A__ = jnp.where(shifted_input_ids == -100 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return shifted_input_ids class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'mt5' lowerCamelCase = MTaConfig class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'mt5' lowerCamelCase = MTaConfig class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'mt5' lowerCamelCase = MTaConfig
7
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = AlbertTokenizer lowerCamelCase = AlbertTokenizerFast lowerCamelCase = True lowerCamelCase = True lowerCamelCase = True def snake_case__ ( self : Dict )-> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = AlbertTokenizer(lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : List[str],lowercase_ : str )-> Any: '''simple docstring''' A__ = 'this is a test' A__ = 'this is a test' return input_text, output_text def snake_case__ ( self : List[Any] )-> Optional[int]: '''simple docstring''' A__ = '<pad>' A__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : List[str] )-> str: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0],'<pad>' ) self.assertEqual(vocab_keys[1],'<unk>' ) self.assertEqual(vocab_keys[-1],'▁eloquent' ) self.assertEqual(len(lowercase_ ),3_0_0_0_0 ) def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size,3_0_0_0_0 ) def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = 'I was born in 92000, and this is falsé.' A__ = tokenizer.tokenize(lowercase_ ) A__ = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(lowercase_ ) A__ = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) def snake_case__ ( self : int )-> int: '''simple docstring''' A__ = AlbertTokenizer(lowercase_,keep_accents=lowercase_ ) A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁this', '▁is', '▁a', '▁test'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[4_8, 2_5, 2_1, 1_2_8_9] ) A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] ) A__ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual(lowercase_,[3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] ) A__ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'],) def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' A__ = AlbertTokenizer(lowercase_ ) A__ = tokenizer.encode('sequence builders' ) A__ = tokenizer.encode('multi-sequence build' ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' A__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='albert-base-v2',revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e',)
7
1
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 1000 ) -> int: '''simple docstring''' A__ = 2**power A__ = 0 while n: A__ , A__ = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
7
from typing import Dict from .base import GenericTensor, Pipeline class A ( _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : int,lowercase_ : Dict=None,lowercase_ : Tuple=None,lowercase_ : List[Any]=None,**lowercase_ : Any )-> Optional[Any]: '''simple docstring''' if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def snake_case__ ( self : Dict,lowercase_ : List[Any],**lowercase_ : Tuple )-> Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework A__ = self.tokenizer(lowercase_,return_tensors=lowercase_,**lowercase_ ) return model_inputs def snake_case__ ( self : Tuple,lowercase_ : int )-> Optional[Any]: '''simple docstring''' A__ = self.model(**lowercase_ ) return model_outputs def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : List[str]=False )-> Any: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[Any],*lowercase_ : int,**lowercase_ : Optional[Any] )-> int: '''simple docstring''' return super().__call__(*lowercase_,**lowercase_ )
7
1
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> np.ndarray: '''simple docstring''' if (ksize % 2) == 0: A__ = ksize + 1 A__ = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(SCREAMING_SNAKE_CASE__ ): for x in range(SCREAMING_SNAKE_CASE__ ): # distance from center A__ = x - ksize // 2 A__ = y - ksize // 2 # degree to radiant A__ = theta / 180 * np.pi A__ = np.cos(_theta ) A__ = np.sin(_theta ) # get kernel x A__ = cos_theta * px + sin_theta * py # get kernel y A__ = -sin_theta * px + cos_theta * py # fill kernel A__ = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image lowercase_ = imread("../image_data/lena.jpg") # turn image in gray scale value lowercase_ = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges lowercase_ = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: lowercase_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) lowercase_ = out / out.max() * 255 lowercase_ = out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
7
from timeit import timeit def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) A__ = 0 while number: number &= number - 1 result += 1 return result def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) A__ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def _snake_case( ) -> None: '''simple docstring''' def do_benchmark(SCREAMING_SNAKE_CASE__ : int ) -> None: A__ = 'import __main__ as z' print(f'Benchmark when {number = }:' ) print(f'{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE__ ) = }' ) A__ = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=SCREAMING_SNAKE_CASE__ ) print(f'timeit() runs in {timing} seconds' ) print(f'{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE__ ) = }' ) A__ = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=SCREAMING_SNAKE_CASE__ , ) print(f'timeit() runs in {timing} seconds' ) for number in (25, 37, 58, 0): do_benchmark(SCREAMING_SNAKE_CASE__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
7
1
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A ( unittest.TestCase ): """simple docstring""" def __init__( self : Union[str, Any],lowercase_ : str,lowercase_ : Optional[int]=3,lowercase_ : Optional[Any]=3_2,lowercase_ : str=3,lowercase_ : List[str]=1_0,lowercase_ : List[Any]=[1_0, 2_0, 3_0, 4_0],lowercase_ : Dict=[1, 1, 2, 1],lowercase_ : List[str]=True,lowercase_ : Tuple=True,lowercase_ : Optional[Any]="relu",lowercase_ : Tuple=3,lowercase_ : Any=None,)-> Tuple: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = embeddings_size A__ = hidden_sizes A__ = depths A__ = is_training A__ = use_labels A__ = hidden_act A__ = num_labels A__ = scope A__ = len(lowercase_ ) def snake_case__ ( self : Any )-> List[Any]: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = self.get_config() return config, pixel_values def snake_case__ ( self : Any )-> Dict: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,image_size=self.image_size,) def snake_case__ ( self : List[Any],lowercase_ : Optional[Any],lowercase_ : List[str] )-> List[str]: '''simple docstring''' A__ = FlaxRegNetModel(config=lowercase_ ) A__ = model(lowercase_ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2),) def snake_case__ ( self : Any,lowercase_ : int,lowercase_ : List[str] )-> Optional[Any]: '''simple docstring''' A__ = self.num_labels A__ = FlaxRegNetForImageClassification(config=lowercase_ ) A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def snake_case__ ( self : Tuple )-> str: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : Tuple )-> None: '''simple docstring''' A__ = FlaxRegNetModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case__ ( self : List[str] )-> int: '''simple docstring''' return def snake_case__ ( self : str )-> Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @unittest.skip(reason='RegNet does not use inputs_embeds' ) def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='RegNet does not support input and output embeddings' ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' pass def snake_case__ ( self : int )-> Dict: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) A__ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1],lowercase_ ) def snake_case__ ( self : Optional[Any] )-> Optional[Any]: '''simple docstring''' def check_hidden_states_output(lowercase_ : List[str],lowercase_ : List[Any],lowercase_ : Optional[int] ): A__ = model_class(lowercase_ ) A__ = model(**self._prepare_for_class(lowercase_,lowercase_ ) ) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = self.model_tester.num_stages self.assertEqual(len(lowercase_ ),expected_num_stages + 1 ) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(lowercase_,lowercase_,lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Tuple )-> Tuple: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A__ = self._prepare_for_class(lowercase_,lowercase_ ) A__ = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ : int,**lowercase_ : Dict ): return model(pixel_values=lowercase_,**lowercase_ ) with self.subTest('JIT Enabled' ): A__ = model_jitted(**lowercase_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): A__ = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ),len(lowercase_ ) ) for jitted_output, output in zip(lowercase_,lowercase_ ): self.assertEqual(jitted_output.shape,output.shape ) def _snake_case( ) -> Dict: '''simple docstring''' A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_flax class A ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : Dict )-> Optional[int]: '''simple docstring''' return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None @slow def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' A__ = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=lowercase_,return_tensors='np' ) A__ = model(**lowercase_ ) # verify the logits A__ = (1, 1_0_0_0) self.assertEqual(outputs.logits.shape,lowercase_ ) A__ = jnp.array([-0.4_180, -1.5_051, -3.4_836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3],lowercase_,atol=1E-4 ) )
7
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int: '''simple docstring''' A__ = 384 A__ = 7 if "tiny" in model_name: A__ = 96 A__ = (2, 2, 6, 2) A__ = (3, 6, 12, 24) elif "small" in model_name: A__ = 96 A__ = (2, 2, 18, 2) A__ = (3, 6, 12, 24) elif "base" in model_name: A__ = 128 A__ = (2, 2, 18, 2) A__ = (4, 8, 16, 32) A__ = 12 A__ = 512 elif "large" in model_name: A__ = 192 A__ = (2, 2, 18, 2) A__ = (6, 12, 24, 48) A__ = 12 A__ = 768 # set label information A__ = 150 A__ = 'huggingface/label-files' A__ = 'ade20k-id2label.json' A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) ) A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} A__ = {v: k for k, v in idalabel.items()} A__ = SwinConfig( embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) A__ = UperNetConfig( backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , ) return config def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: '''simple docstring''' A__ = [] # fmt: off # stem rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]: '''simple docstring''' A__ = dct.pop(SCREAMING_SNAKE_CASE__ ) A__ = val def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: '''simple docstring''' A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): A__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' ) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[:dim, :] A__ = in_proj_bias[: dim] A__ = in_proj_weight[ dim : dim * 2, : ] A__ = in_proj_bias[ dim : dim * 2 ] A__ = in_proj_weight[ -dim :, : ] A__ = in_proj_bias[-dim :] # fmt: on def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 ) A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 ) A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(4 , in_channel // 4 ) A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(in_channel // 4 , 4 ) A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A__ = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } A__ = model_name_to_url[model_name] A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[ 'state_dict' ] for name, param in state_dict.items(): print(SCREAMING_SNAKE_CASE__ , param.shape ) A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ ) A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "bn" in key: A__ = key.replace('bn' , 'batch_norm' ) A__ = val # rename keys A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ ) if "norm" in key: A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # verify on image A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' ) A__ = SegformerImageProcessor() A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits print(logits.shape ) print('First values of logits:' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": A__ = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": A__ = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": A__ = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": A__ = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(f'openmmlab/{model_name}' ) processor.push_to_hub(f'openmmlab/{model_name}' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-swin-tiny", type=str, choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]], help="Name of the Swin + UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowercase_ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
7
1
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class A : """simple docstring""" @staticmethod def snake_case__ ( *lowercase_ : Tuple,**lowercase_ : List[str] )-> Optional[Any]: '''simple docstring''' pass @is_pipeline_test @require_torch @require_vision class A ( unittest.TestCase ): """simple docstring""" lowerCamelCase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def snake_case__ ( self : Dict,lowercase_ : Union[str, Any],lowercase_ : Optional[Any],lowercase_ : Tuple )-> int: '''simple docstring''' A__ = pipeline('visual-question-answering',model='hf-internal-testing/tiny-vilt-random-vqa' ) A__ = [ { 'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'question': 'How many cats are there?', }, { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'question': 'How many cats are there?', }, ] return vqa_pipeline, examples def snake_case__ ( self : Optional[Any],lowercase_ : List[str],lowercase_ : Dict )-> Tuple: '''simple docstring''' A__ = vqa_pipeline(lowercase_,top_k=1 ) self.assertEqual( lowercase_,[ [{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}], [{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}], ],) @require_torch def snake_case__ ( self : List[str] )-> Optional[int]: '''simple docstring''' A__ = pipeline('visual-question-answering',model='hf-internal-testing/tiny-vilt-random-vqa' ) A__ = './tests/fixtures/tests_samples/COCO/000000039769.png' A__ = 'How many cats are there?' A__ = vqa_pipeline(image=lowercase_,question='How many cats are there?',top_k=2 ) self.assertEqual( lowercase_,[{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}, {'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}] ) A__ = vqa_pipeline({'image': image, 'question': question},top_k=2 ) self.assertEqual( lowercase_,[{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}, {'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ )}] ) @slow @require_torch def snake_case__ ( self : Dict )-> Optional[int]: '''simple docstring''' A__ = pipeline('visual-question-answering',model='dandelin/vilt-b32-finetuned-vqa' ) A__ = './tests/fixtures/tests_samples/COCO/000000039769.png' A__ = 'How many cats are there?' A__ = vqa_pipeline(image=lowercase_,question=lowercase_,top_k=2 ) self.assertEqual( nested_simplify(lowercase_,decimals=4 ),[{'score': 0.8_799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] ) A__ = vqa_pipeline({'image': image, 'question': question},top_k=2 ) self.assertEqual( nested_simplify(lowercase_,decimals=4 ),[{'score': 0.8_799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] ) A__ = vqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}],top_k=2 ) self.assertEqual( nested_simplify(lowercase_,decimals=4 ),[[{'score': 0.8_799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2,) @require_tf @unittest.skip('Visual question answering not implemented in TF' ) def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' pass
7
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed lowercase_ = "true" def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=82 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 ) -> Optional[Any]: '''simple docstring''' set_seed(42 ) A__ = RegressionModel() A__ = deepcopy(SCREAMING_SNAKE_CASE__ ) A__ = RegressionDataset(length=SCREAMING_SNAKE_CASE__ ) A__ = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) model.to(accelerator.device ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model, ddp_model, dataloader def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' ) A__ = load_dataset('glue' , 'mrpc' , split='validation' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : List[Any] ): A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs with accelerator.main_process_first(): A__ = dataset.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) A__ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(SCREAMING_SNAKE_CASE__ : Dict ): if use_longest: return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=16 ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> str: '''simple docstring''' A__ = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE__ , split_batches=SCREAMING_SNAKE_CASE__ ) A__ = get_dataloader(SCREAMING_SNAKE_CASE__ , not dispatch_batches ) A__ = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: '''simple docstring''' A__ = [] for batch in dataloader: A__ , A__ = batch.values() with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) A__ , A__ = [], [] for logit, targ in logits_and_targets: logits.append(SCREAMING_SNAKE_CASE__ ) targs.append(SCREAMING_SNAKE_CASE__ ) A__ , A__ = torch.cat(SCREAMING_SNAKE_CASE__ ), torch.cat(SCREAMING_SNAKE_CASE__ ) return logits, targs def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int=82 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=16 ) -> List[Any]: '''simple docstring''' A__ , A__ , A__ = get_basic_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ , A__ = generate_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert ( len(SCREAMING_SNAKE_CASE__ ) == num_samples ), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE__ )}' def _snake_case( SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False ) -> str: '''simple docstring''' A__ = evaluate.load('glue' , 'mrpc' ) A__ , A__ = get_mrpc_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # First do baseline A__ , A__ , A__ = setup['no'] model.to(SCREAMING_SNAKE_CASE__ ) model.eval() for batch in dataloader: batch.to(SCREAMING_SNAKE_CASE__ ) with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=batch['labels'] ) A__ = metric.compute() # Then do distributed A__ , A__ , A__ = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) A__ = batch['labels'] A__ , A__ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ ) A__ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n' def _snake_case( ) -> Optional[Any]: '''simple docstring''' A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' ) test_mrpc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' ) test_torch_metrics(SCREAMING_SNAKE_CASE__ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**' ) A__ = Accelerator() test_torch_metrics(SCREAMING_SNAKE_CASE__ , 512 ) accelerator.state._reset_state() def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' main() if __name__ == "__main__": main()
7
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'open-llama' def __init__( self : Any,lowercase_ : Optional[int]=1_0_0_0_0_0,lowercase_ : Union[str, Any]=4_0_9_6,lowercase_ : Dict=1_1_0_0_8,lowercase_ : Dict=3_2,lowercase_ : Optional[int]=3_2,lowercase_ : Dict="silu",lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Optional[int]=0.02,lowercase_ : Dict=1E-6,lowercase_ : Dict=True,lowercase_ : List[Any]=0,lowercase_ : Optional[int]=1,lowercase_ : str=2,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : int=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=True,lowercase_ : Any=None,**lowercase_ : List[Any],)-> Tuple: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = initializer_range A__ = rms_norm_eps A__ = use_cache A__ = kwargs.pop( 'use_memorry_efficient_attention',lowercase_ ) A__ = hidden_dropout_prob A__ = attention_dropout_prob A__ = use_stable_embedding A__ = shared_input_output_embedding A__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,tie_word_embeddings=lowercase_,**lowercase_,) def snake_case__ ( self : str )-> str: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling,lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'got {self.rope_scaling}' ) A__ = self.rope_scaling.get('type',lowercase_ ) A__ = self.rope_scaling.get('factor',lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(lowercase_,lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
7
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: '''simple docstring''' A__ = 0 A__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None A__ = sorted_collection[point] if current_item == item: return point else: if point < left: A__ = left A__ = point elif point > right: A__ = right A__ = point else: if item < current_item: A__ = point - 1 else: A__ = point + 1 return None def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif point > right: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 ) else: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' if collection != sorted(SCREAMING_SNAKE_CASE__ ): raise ValueError('Collection must be ascending sorted' ) return True if __name__ == "__main__": import sys lowercase_ = 0 if debug == 1: lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") lowercase_ = 67 lowercase_ = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("Not found")
7
1
from maths.prime_factors import prime_factors def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): A__ = f'Input value of [number={number}] must be an integer' raise TypeError(SCREAMING_SNAKE_CASE__ ) if number < 1: raise ValueError('Input must be a positive integer' ) return -1 if len(prime_factors(SCREAMING_SNAKE_CASE__ ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
7
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def _snake_case( ) -> Dict: '''simple docstring''' A__ = ArgumentParser( 'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE__ ) A__ = parser.add_subparsers(help='datasets-cli command helpers' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) TestCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) # Parse args A__ , A__ = parser.parse_known_args() if not hasattr(SCREAMING_SNAKE_CASE__ , 'func' ): parser.print_help() exit(1 ) A__ = parse_unknown_args(SCREAMING_SNAKE_CASE__ ) # Run A__ = args.func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) service.run() if __name__ == "__main__": main()
7
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType lowercase_ = logging.get_logger(__name__) lowercase_ = { "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json" ), } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'deberta-v2' def __init__( self : int,lowercase_ : List[str]=1_2_8_1_0_0,lowercase_ : Union[str, Any]=1_5_3_6,lowercase_ : Any=2_4,lowercase_ : Optional[int]=2_4,lowercase_ : Tuple=6_1_4_4,lowercase_ : Dict="gelu",lowercase_ : str=0.1,lowercase_ : List[Any]=0.1,lowercase_ : int=5_1_2,lowercase_ : Any=0,lowercase_ : Optional[int]=0.02,lowercase_ : List[str]=1E-7,lowercase_ : int=False,lowercase_ : int=-1,lowercase_ : str=0,lowercase_ : Tuple=True,lowercase_ : Dict=None,lowercase_ : int=0,lowercase_ : Tuple="gelu",**lowercase_ : List[Any],)-> Union[str, Any]: '''simple docstring''' super().__init__(**lowercase_ ) A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = initializer_range A__ = relative_attention A__ = max_relative_positions A__ = pad_token_id A__ = position_biased_input # Backwards compatibility if type(lowercase_ ) == str: A__ = [x.strip() for x in pos_att_type.lower().split('|' )] A__ = pos_att_type A__ = vocab_size A__ = layer_norm_eps A__ = kwargs.get('pooler_hidden_size',lowercase_ ) A__ = pooler_dropout A__ = pooler_hidden_act class A ( _UpperCAmelCase ): """simple docstring""" @property def snake_case__ ( self : int )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": A__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A__ = {0: 'batch', 1: 'sequence'} if self._config.type_vocab_size > 0: return OrderedDict( [('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] ) else: return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] ) @property def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' return 1_2 def snake_case__ ( self : Dict,lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional["TensorType"] = None,lowercase_ : int = 3,lowercase_ : int = 4_0,lowercase_ : int = 4_0,lowercase_ : "PreTrainedTokenizerBase" = None,)-> Mapping[str, Any]: '''simple docstring''' A__ = super().generate_dummy_inputs(preprocessor=lowercase_,framework=lowercase_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
7
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A : """simple docstring""" def __init__( self : Union[str, Any],lowercase_ : Any,lowercase_ : Union[str, Any]=1_3,lowercase_ : Tuple=3_0,lowercase_ : List[Any]=2,lowercase_ : Optional[int]=3,lowercase_ : Union[str, Any]=True,lowercase_ : Tuple=True,lowercase_ : Any=3_2,lowercase_ : List[str]=2,lowercase_ : Optional[int]=4,lowercase_ : Union[str, Any]=3_7,lowercase_ : Tuple="gelu",lowercase_ : str=0.1,lowercase_ : Tuple=0.1,lowercase_ : Union[str, Any]=1_0,lowercase_ : int=0.02,lowercase_ : List[Any]=3,lowercase_ : Any=None,)-> Dict: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A__ = (image_size // patch_size) ** 2 A__ = num_patches + 1 def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' return ViTConfig( image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=lowercase_,initializer_range=self.initializer_range,) def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Tuple )-> Optional[Any]: '''simple docstring''' A__ = TFViTModel(config=lowercase_ ) A__ = model(lowercase_,training=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) A__ = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) ) def snake_case__ ( self : List[Any],lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : List[Any] )-> Dict: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFViTForImageClassification(lowercase_ ) A__ = model(lowercase_,labels=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ = 1 A__ = TFViTForImageClassification(lowercase_ ) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () lowerCamelCase = ( {'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' A__ = TFViTModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' pass @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Any )-> int: '''simple docstring''' pass def snake_case__ ( self : str )-> Dict: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_,tf.keras.layers.Layer ) ) def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) A__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1],lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : Optional[Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(lowercase_ ) def _snake_case( ) -> str: '''simple docstring''' A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : List[Any] )-> str: '''simple docstring''' return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def snake_case__ ( self : Any )-> Dict: '''simple docstring''' A__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=lowercase_,return_tensors='tf' ) # forward pass A__ = model(**lowercase_ ) # verify the logits A__ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape,lowercase_ ) A__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] ) tf.debugging.assert_near(outputs.logits[0, :3],lowercase_,atol=1E-4 )
7
1
import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowercase_ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", f"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", f"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.encoder.norm.weight", "encoder.layernorm.weight"), ("transformer.encoder.norm.bias", "encoder.layernorm.bias"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ] ) def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[str]: '''simple docstring''' A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) A__ = val def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> int: '''simple docstring''' A__ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A__ = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) A__ = value else: A__ = value return new_state_dict def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]: '''simple docstring''' A__ = '' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A__ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) A__ = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[:256, :] A__ = in_proj_bias[:256] A__ = in_proj_weight[256:512, :] A__ = in_proj_bias[256:512] A__ = in_proj_weight[-256:, :] A__ = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention A__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[:256, :] A__ = in_proj_bias[:256] A__ = in_proj_weight[256:512, :] A__ = in_proj_bias[256:512] A__ = in_proj_weight[-256:, :] A__ = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention A__ = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) A__ = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict A__ = in_proj_weight_cross_attn[:256, :] A__ = in_proj_bias_cross_attn[:256] A__ = in_proj_weight_cross_attn[256:512, :] A__ = in_proj_bias_cross_attn[256:512] A__ = in_proj_weight_cross_attn[-256:, :] A__ = in_proj_bias_cross_attn[-256:] def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> str: '''simple docstring''' A__ , A__ = image.size A__ = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = 800 if 'detection' in checkpoint_url else 1000 A__ = target_max_size / current_max_size A__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> int: '''simple docstring''' A__ = F.to_tensor(SCREAMING_SNAKE_CASE__ ) A__ = F.normalize(SCREAMING_SNAKE_CASE__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict: '''simple docstring''' logger.info('Converting model...' ) # load original state dict A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' ) # rename keys for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = rename_backbone_keys(SCREAMING_SNAKE_CASE__ ) # query, key and value matrices need special treatment read_in_q_k_v(SCREAMING_SNAKE_CASE__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A__ = 'model.' for key in state_dict.copy().keys(): if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) A__ = val # create HuggingFace model and load state dict A__ = TableTransformerConfig( backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: A__ = 15 A__ = 2 A__ = {0: 'table', 1: 'table rotated'} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} else: A__ = 125 A__ = 6 A__ = { 0: 'table', 1: 'table column', 2: 'table row', 3: 'table column header', 4: 'table projected row header', 5: 'table spanning cell', } A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = DetrImageProcessor( format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 ) A__ = TableTransformerForObjectDetection(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) model.eval() # verify our conversion A__ = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png' A__ = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=SCREAMING_SNAKE_CASE__ ) A__ = Image.open(SCREAMING_SNAKE_CASE__ ).convert('RGB' ) A__ = normalize(resize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ).unsqueeze(0 ) A__ = model(SCREAMING_SNAKE_CASE__ ) if "detection" in checkpoint_url: A__ = (1, 15, 3) A__ = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) A__ = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: A__ = (1, 125, 7) A__ = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) A__ = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: # Push model to HF hub logger.info('Pushing model to the hub...' ) A__ = ( 'microsoft/table-transformer-detection' if 'detection' in checkpoint_url else 'microsoft/table-transformer-structure-recognition' ) model.push_to_hub(SCREAMING_SNAKE_CASE__ ) image_processor.push_to_hub(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", type=str, choices=[ "https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth", "https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth", ], help="URL of the Table Transformer checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowercase_ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
7
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class A : """simple docstring""" def __init__( self : str,lowercase_ : Any,lowercase_ : Tuple=1_3,lowercase_ : str=7,lowercase_ : Tuple=True,lowercase_ : int=True,lowercase_ : List[Any]=True,lowercase_ : List[str]=True,lowercase_ : List[str]=9_9,lowercase_ : List[Any]=6_4,lowercase_ : List[str]=5,lowercase_ : Optional[Any]=4,lowercase_ : Optional[Any]=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : int=0.1,lowercase_ : str=0.1,lowercase_ : Optional[Any]=5_1_2,lowercase_ : int=1_6,lowercase_ : List[Any]=2,lowercase_ : Union[str, Any]=0.02,lowercase_ : Tuple=3,lowercase_ : List[Any]=4,lowercase_ : str=None,)-> Union[str, Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope A__ = vocab_size - 1 def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) A__ = self.get_config() return config, input_ids, input_mask, token_labels def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,pad_token_id=self.pad_token_id,) def snake_case__ ( self : Optional[int] )-> Union[str, Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = True return config, input_ids, input_mask, token_labels def snake_case__ ( self : Any,lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : str )-> Any: '''simple docstring''' A__ = GPTNeoXModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) A__ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Union[str, Any],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Optional[Any] )-> Tuple: '''simple docstring''' A__ = True A__ = GPTNeoXModel(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : List[str] )-> List[str]: '''simple docstring''' A__ = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Dict,lowercase_ : Any )-> int: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForQuestionAnswering(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Optional[int] )-> str: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def snake_case__ ( self : Any,lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : int )-> Union[str, Any]: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForTokenClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : int,lowercase_ : str,lowercase_ : int,lowercase_ : Union[str, Any] )-> List[Any]: '''simple docstring''' A__ = True A__ = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() # first forward pass A__ = model(lowercase_,attention_mask=lowercase_,use_cache=lowercase_ ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3),config.vocab_size ) A__ = ids_tensor((self.batch_size, 3),vocab_size=2 ) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens],dim=-1 ) A__ = torch.cat([input_mask, next_mask],dim=-1 ) A__ = model(lowercase_,attention_mask=lowercase_,output_hidden_states=lowercase_ ) A__ = output_from_no_past['hidden_states'][0] A__ = model( lowercase_,attention_mask=lowercase_,past_key_values=lowercase_,output_hidden_states=lowercase_,)['hidden_states'][0] # select random slice A__ = ids_tensor((1,),output_from_past.shape[-1] ).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-3 ) ) def snake_case__ ( self : str )-> Union[str, Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowerCamelCase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = GPTNeoXModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,hidden_size=6_4,num_attention_heads=8 ) def snake_case__ ( self : Optional[Any] )-> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : List[str] )-> Any: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Dict )-> Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowercase_ ) def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_ ) def snake_case__ ( self : Any )-> List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) @unittest.skip(reason='Feed forward chunking is not implemented' ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)] ) def snake_case__ ( self : List[str],lowercase_ : Any )-> List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = ids_tensor([1, 1_0],config.vocab_size ) A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights A__ = GPTNeoXModel(lowercase_ ) original_model.to(lowercase_ ) original_model.eval() A__ = original_model(lowercase_ ).last_hidden_state A__ = original_model(lowercase_ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights A__ = {'type': scaling_type, 'factor': 10.0} A__ = GPTNeoXModel(lowercase_ ) scaled_model.to(lowercase_ ) scaled_model.eval() A__ = scaled_model(lowercase_ ).last_hidden_state A__ = scaled_model(lowercase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) else: self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) @require_torch class A ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Tuple )-> Union[str, Any]: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' ) for checkpointing in [True, False]: A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowercase_ ) A__ = tokenizer('My favorite food is',return_tensors='pt' ).to(lowercase_ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure' A__ = model.generate(**lowercase_,do_sample=lowercase_,max_new_tokens=2_0 ) A__ = tokenizer.batch_decode(lowercase_ )[0] self.assertEqual(lowercase_,lowercase_ )
7
1
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = (DPMSolverSinglestepScheduler,) lowerCamelCase = (('num_inference_steps', 25),) def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Optional[int]: '''simple docstring''' A__ = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'prediction_type': 'epsilon', 'thresholding': False, 'sample_max_value': 1.0, 'algorithm_type': 'dpmsolver++', 'solver_type': 'midpoint', 'lambda_min_clipped': -float('inf' ), 'variance_type': None, } config.update(**lowercase_ ) return config def snake_case__ ( self : str,lowercase_ : Optional[Any]=0,**lowercase_ : Any )-> List[Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ , A__ = sample, sample for t in range(lowercase_,time_step + scheduler.config.solver_order + 1 ): A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : List[str] )-> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any]=0,**lowercase_ : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=None,**lowercase_ : int )-> int: '''simple docstring''' if scheduler is None: A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample return sample def snake_case__ ( self : Any )-> str: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = 5_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_574 ) < 1E-3 def snake_case__ ( self : Optional[Any] )-> List[Any]: '''simple docstring''' for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowercase_ ) def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 A__ = DEISMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverMultistepScheduler.from_config(scheduler.config ) A__ = UniPCMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Tuple )-> Any: '''simple docstring''' self.check_over_configs(thresholding=lowercase_ ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowercase_,prediction_type=lowercase_,sample_max_value=lowercase_,algorithm_type='dpmsolver++',solver_order=lowercase_,solver_type=lowercase_,) def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) A__ = self.full_loop( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers" def snake_case__ ( self : Optional[int] )-> Tuple: '''simple docstring''' self.check_over_configs(lower_order_final=lowercase_ ) self.check_over_configs(lower_order_final=lowercase_ ) def snake_case__ ( self : Tuple )-> Optional[int]: '''simple docstring''' self.check_over_configs(lambda_min_clipped=-float('inf' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def snake_case__ ( self : Optional[Any] )-> Tuple: '''simple docstring''' self.check_over_configs(variance_type=lowercase_ ) self.check_over_configs(variance_type='learned_range' ) def snake_case__ ( self : str )-> Any: '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=lowercase_,time_step=0 ) def snake_case__ ( self : Tuple )-> Tuple: '''simple docstring''' A__ = self.full_loop() A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Any )-> Union[str, Any]: '''simple docstring''' A__ = self.full_loop(use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_248 ) < 1E-3 def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction' ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.1_453 ) < 1E-3 def snake_case__ ( self : Tuple )-> int: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction',use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.0_649 ) < 1E-3 def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(thresholding=lowercase_,dynamic_thresholding_ratio=0 ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter.half() scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample assert sample.dtype == torch.floataa
7
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'open-llama' def __init__( self : Any,lowercase_ : Optional[int]=1_0_0_0_0_0,lowercase_ : Union[str, Any]=4_0_9_6,lowercase_ : Dict=1_1_0_0_8,lowercase_ : Dict=3_2,lowercase_ : Optional[int]=3_2,lowercase_ : Dict="silu",lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Optional[int]=0.02,lowercase_ : Dict=1E-6,lowercase_ : Dict=True,lowercase_ : List[Any]=0,lowercase_ : Optional[int]=1,lowercase_ : str=2,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : int=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=True,lowercase_ : Any=None,**lowercase_ : List[Any],)-> Tuple: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = initializer_range A__ = rms_norm_eps A__ = use_cache A__ = kwargs.pop( 'use_memorry_efficient_attention',lowercase_ ) A__ = hidden_dropout_prob A__ = attention_dropout_prob A__ = use_stable_embedding A__ = shared_input_output_embedding A__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,tie_word_embeddings=lowercase_,**lowercase_,) def snake_case__ ( self : str )-> str: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling,lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'got {self.rope_scaling}' ) A__ = self.rope_scaling.get('type',lowercase_ ) A__ = self.rope_scaling.get('factor',lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(lowercase_,lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
7
1
import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) lowercase_ = "Hello world! cécé herlolip" def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : bool ) -> str: '''simple docstring''' A__ = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) roberta.eval() # disable dropout A__ = roberta.model.encoder.sentence_encoder A__ = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: A__ = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0] print('Our RoBERTa config:' , SCREAMING_SNAKE_CASE__ ) A__ = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE__ ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE__ ) model.eval() # Now let's copy all the weights. # Embeddings A__ = roberta_sent_encoder.embed_tokens.weight A__ = roberta_sent_encoder.embed_positions.weight A__ = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. A__ = roberta_sent_encoder.layer_norm.weight A__ = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer A__ = model.roberta.encoder.layer[i] A__ = roberta_sent_encoder.layers[i] A__ = layer.attention A__ = roberta_layer.self_attn_layer_norm.weight A__ = roberta_layer.self_attn_layer_norm.bias # self attention A__ = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) A__ = roberta_layer.self_attn.q_proj.weight A__ = roberta_layer.self_attn.q_proj.bias A__ = roberta_layer.self_attn.k_proj.weight A__ = roberta_layer.self_attn.k_proj.bias A__ = roberta_layer.self_attn.v_proj.weight A__ = roberta_layer.self_attn.v_proj.bias # self-attention output A__ = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape A__ = roberta_layer.self_attn.out_proj.weight A__ = roberta_layer.self_attn.out_proj.bias # this one is final layer norm A__ = roberta_layer.final_layer_norm.weight A__ = roberta_layer.final_layer_norm.bias # intermediate A__ = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape A__ = roberta_layer.fca.weight A__ = roberta_layer.fca.bias # output A__ = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape A__ = roberta_layer.fca.weight A__ = roberta_layer.fca.bias # end of layer if classification_head: A__ = roberta.model.classification_heads['mnli'].dense.weight A__ = roberta.model.classification_heads['mnli'].dense.bias A__ = roberta.model.classification_heads['mnli'].out_proj.weight A__ = roberta.model.classification_heads['mnli'].out_proj.bias else: # LM Head A__ = roberta.model.encoder.lm_head.dense.weight A__ = roberta.model.encoder.lm_head.dense.bias A__ = roberta.model.encoder.lm_head.layer_norm.weight A__ = roberta.model.encoder.lm_head.layer_norm.bias A__ = roberta.model.encoder.lm_head.weight A__ = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. A__ = roberta.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) # batch of size 1 A__ = model(SCREAMING_SNAKE_CASE__ )[0] if classification_head: A__ = roberta.model.classification_heads['mnli'](roberta.extract_features(SCREAMING_SNAKE_CASE__ ) ) else: A__ = roberta.model(SCREAMING_SNAKE_CASE__ )[0] print(our_output.shape , their_output.shape ) A__ = torch.max(torch.abs(our_output - their_output ) ).item() print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7 A__ = torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) print('Do both models output the same tensors?' , '🔥' if success else '💩' ) if not success: raise Exception('Something went wRoNg' ) pathlib.Path(SCREAMING_SNAKE_CASE__ ).mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) lowercase_ = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
7
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return EnvironmentCommand() class A ( _UpperCAmelCase ): """simple docstring""" @staticmethod def snake_case__ ( lowercase_ : ArgumentParser )-> Dict: '''simple docstring''' A__ = parser.add_parser('env' ) download_parser.set_defaults(func=lowercase_ ) def snake_case__ ( self : List[Any] )-> List[str]: '''simple docstring''' A__ = huggingface_hub.__version__ A__ = 'not installed' A__ = 'NA' if is_torch_available(): import torch A__ = torch.__version__ A__ = torch.cuda.is_available() A__ = 'not installed' if is_transformers_available(): import transformers A__ = transformers.__version__ A__ = 'not installed' if is_accelerate_available(): import accelerate A__ = accelerate.__version__ A__ = 'not installed' if is_xformers_available(): import xformers A__ = xformers.__version__ A__ = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})', 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(lowercase_ ) ) return info @staticmethod def snake_case__ ( lowercase_ : int )-> Optional[Any]: '''simple docstring''' return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
7
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = "▁" lowercase_ = {"vocab_file": "spiece.model"} lowercase_ = { "vocab_file": { "google/reformer-crime-and-punishment": ( "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model" ) } } lowercase_ = { "google/reformer-crime-and-punishment": 524288, } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['input_ids', 'attention_mask'] def __init__( self : str,lowercase_ : List[Any],lowercase_ : List[Any]="</s>",lowercase_ : Dict="<unk>",lowercase_ : Optional[Any]=[],lowercase_ : Optional[Dict[str, Any]] = None,**lowercase_ : List[Any],)-> None: '''simple docstring''' A__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowercase_,unk_token=lowercase_,additional_special_tokens=lowercase_,sp_model_kwargs=self.sp_model_kwargs,**lowercase_,) A__ = vocab_file A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase_ ) @property def snake_case__ ( self : Optional[int] )-> List[str]: '''simple docstring''' return self.sp_model.get_piece_size() def snake_case__ ( self : Dict )-> Dict[str, int]: '''simple docstring''' A__ = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : int )-> Optional[int]: '''simple docstring''' A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : Optional[Any],lowercase_ : str )-> int: '''simple docstring''' A__ = d # for backward compatibility if not hasattr(self,'sp_model_kwargs' ): A__ = {} A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def snake_case__ ( self : List[str],lowercase_ : str )-> List[str]: '''simple docstring''' return self.sp_model.encode(lowercase_,out_type=lowercase_ ) def snake_case__ ( self : Dict,lowercase_ : List[str] )-> Optional[int]: '''simple docstring''' return self.sp_model.piece_to_id(lowercase_ ) def snake_case__ ( self : List[Any],lowercase_ : List[Any] )-> Tuple: '''simple docstring''' if index < self.sp_model.get_piece_size(): A__ = self.sp_model.IdToPiece(lowercase_ ) return token def snake_case__ ( self : List[Any],lowercase_ : Tuple )-> Tuple: '''simple docstring''' A__ = [] A__ = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase_ ) + token A__ = [] else: current_sub_tokens.append(lowercase_ ) out_string += self.sp_model.decode(lowercase_ ) return out_string.strip() def snake_case__ ( self : Dict,lowercase_ : str,lowercase_ : Optional[str] = None )-> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowercase_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A__ = os.path.join( lowercase_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file,lowercase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase_,'wb' ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(lowercase_ ) return (out_vocab_file,)
7
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ReformerTokenizer lowerCamelCase = ReformerTokenizerFast lowerCamelCase = True lowerCamelCase = False lowerCamelCase = True def snake_case__ ( self : Any )-> str: '''simple docstring''' super().setUp() A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : Optional[int] )-> Optional[int]: '''simple docstring''' A__ = '<s>' A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0],'<unk>' ) self.assertEqual(vocab_keys[1],'<s>' ) self.assertEqual(vocab_keys[-1],'j' ) self.assertEqual(len(lowercase_ ),1_0_0_0 ) def snake_case__ ( self : Dict )-> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size,1_0_0_0 ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = 'I was born in 92000, and this is falsé.' A__ = tokenizer.tokenize(lowercase_ ) A__ = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(lowercase_ ) A__ = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) def snake_case__ ( self : int,lowercase_ : Optional[int]=1_5 )-> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ ) # Simple input A__ = 'This is a simple input' A__ = ['This is a simple input 1', 'This is a simple input 2'] A__ = ('This is a simple input', 'This is a pair') A__ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' ) # Simple input self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' ) # Simple input self.assertRaises( lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',) # Pair input self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' ) # Pair input self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' ) # Pair input self.assertRaises( lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',) def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' pass def snake_case__ ( self : Dict )-> str: '''simple docstring''' A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ ) A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ),[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2],) A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ],) A__ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4],) A__ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ],) @cached_property def snake_case__ ( self : Optional[int] )-> Any: '''simple docstring''' return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' ) @slow def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = 'Hello World!' A__ = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7] self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) ) @slow def snake_case__ ( self : Optional[int] )-> str: '''simple docstring''' A__ = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) A__ = [ 1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 3_5, 2_8, 2_7_5, 3, 2_5_9, 2_9_7, 2_6_0, 8_4, 4, 3_5, 1_1_0, 4_4, 8, 2_5_9, 9_1, 2_6_8, 2_1, 1_1, 2_0_9, 2_7_4, 1_0_9, 2_6_6, 2_7_7, 1_1_7, 8_6, 9_3, 3_1_5, 2_5_8, 2_7_8, 2_5_8, 2_7_7, 2_5_8, 0, 2_5_8, 2_8_8, 2_5_8, 3_1_9, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 2_8_7, 2_5_8, 3_1_5, 2_5_8, 2_8_9, 2_5_8, 2_7_8, 9_9, 2_6_9, 2_6_6, 2_6_2, 8, 2_5_9, 2_4_1, 4, 2_1_7, 2_3_0, 2_6_8, 2_6_6, 5_5, 1_6_8, 1_0_6, 7_5, 1_9_3, 2_6_6, 2_2_3, 2_7, 4_9, 2_6, 2_8_2, 2_5, 2_6_4, 2_9_9, 1_9, 2_6, 0, 2_5_8, 2_7_7, 1_1_7, 8_6, 9_3, 1_7_6, 1_8_3, 2_7_0, 1_1, 2_6_2, 4_2, 6_1, 2_6_5, ] self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) ) @require_torch @slow def snake_case__ ( self : int )-> Any: '''simple docstring''' import torch from transformers import ReformerConfig, ReformerModel # Build sequence A__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0] A__ = ' '.join(lowercase_ ) A__ = self.big_tokenizer.encode_plus(lowercase_,return_tensors='pt' ) A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors='pt' ) A__ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) A__ = encoded_sequence['input_ids'].shape A__ = ReformerModel(lowercase_ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase_ ) model(**lowercase_ ) @slow def snake_case__ ( self : int )-> Tuple: '''simple docstring''' A__ = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 A__ = [ 'This is a very simple sentence.', 'The quick brown fox jumps over the lazy dog.', ] self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='google/reformer-crime-and-punishment',revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a',padding=lowercase_,sequences=lowercase_,)
7
1
from torch import nn class A ( nn.Module ): """simple docstring""" def __init__( self : str,lowercase_ : List[Any],lowercase_ : List[Any] )-> Optional[Any]: '''simple docstring''' super().__init__() A__ = class_size A__ = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) A__ = nn.Linear(lowercase_,lowercase_ ) def snake_case__ ( self : Union[str, Any],lowercase_ : List[str] )-> List[str]: '''simple docstring''' A__ = self.mlp(lowercase_ ) return logits
7
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , ) -> float: '''simple docstring''' A__ = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: A__ = 1 - (matter_density + radiation_density + dark_energy) A__ = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) A__ = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation lowercase_ = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
7
1
import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging lowercase_ = logging.get_logger(__name__) class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = CLIPConfig lowerCamelCase = ['CLIPEncoderLayer'] def __init__( self : int,lowercase_ : CLIPConfig )-> Union[str, Any]: '''simple docstring''' super().__init__(lowercase_ ) A__ = CLIPVisionModelWithProjection(config.vision_config ) A__ = nn.Linear(config.vision_config.projection_dim,1 ) A__ = nn.Linear(config.vision_config.projection_dim,1 ) @torch.no_grad() def snake_case__ ( self : Optional[int],lowercase_ : List[Any],lowercase_ : Dict,lowercase_ : str=0.5,lowercase_ : List[Any]=0.5 )-> List[str]: '''simple docstring''' A__ = self.vision_model(lowercase_ )[0] A__ = self.p_head(lowercase_ ) A__ = nsfw_detected.flatten() A__ = nsfw_detected > p_threshold A__ = nsfw_detected.tolist() if any(lowercase_ ): logger.warning( 'Potential NSFW content was detected in one or more images. A black image will be returned instead.' ' Try again with a different prompt and/or seed.' ) for idx, nsfw_detected_ in enumerate(lowercase_ ): if nsfw_detected_: A__ = np.zeros(images[idx].shape ) A__ = self.w_head(lowercase_ ) A__ = watermark_detected.flatten() A__ = watermark_detected > w_threshold A__ = watermark_detected.tolist() if any(lowercase_ ): logger.warning( 'Potential watermarked content was detected in one or more images. A black image will be returned instead.' ' Try again with a different prompt and/or seed.' ) for idx, watermark_detected_ in enumerate(lowercase_ ): if watermark_detected_: A__ = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
7
from typing import Union import fire import torch from tqdm import tqdm def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str = "cpu" , SCREAMING_SNAKE_CASE__ : Union[str, None] = None ) -> None: '''simple docstring''' A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ ) for k, v in tqdm(state_dict.items() ): if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' ) A__ = v.half() if save_path is None: # overwrite src_path A__ = src_path torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": fire.Fire(convert)
7
1
import math import tensorflow as tf from packaging import version def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> int: '''simple docstring''' A__ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) A__ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: '''simple docstring''' A__ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) A__ = tf.cast(math.pi , x.dtype ) A__ = tf.cast(0.04_4715 , x.dtype ) A__ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(SCREAMING_SNAKE_CASE__ , 3 )) )) return x * cdf def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: '''simple docstring''' A__ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) return x * tf.tanh(tf.math.softplus(SCREAMING_SNAKE_CASE__ ) ) def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> int: '''simple docstring''' A__ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) A__ = tf.cast(0.04_4715 , x.dtype ) A__ = tf.cast(0.79_7884_5608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]: '''simple docstring''' A__ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ ) A__ = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]: '''simple docstring''' return tf.clip_by_value(_gelu(SCREAMING_SNAKE_CASE__ ) , -10 , 10 ) def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple=-1 ) -> List[Any]: '''simple docstring''' A__ , A__ = tf.split(SCREAMING_SNAKE_CASE__ , 2 , axis=SCREAMING_SNAKE_CASE__ ) return a * tf.math.sigmoid(SCREAMING_SNAKE_CASE__ ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: '''simple docstring''' return tf.keras.activations.gelu(SCREAMING_SNAKE_CASE__ , approximate=SCREAMING_SNAKE_CASE__ ) lowercase_ = tf.keras.activations.gelu lowercase_ = approximate_gelu_wrap else: lowercase_ = _gelu lowercase_ = _gelu_new lowercase_ = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int: '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
7
import os # Precomputes a list of the 100 first triangular numbers lowercase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def _snake_case( ) -> int: '''simple docstring''' A__ = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE__ ) ) A__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'words.txt' ) A__ = '' with open(SCREAMING_SNAKE_CASE__ ) as f: A__ = f.readline() A__ = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A__ = [ word for word in [sum(ord(SCREAMING_SNAKE_CASE__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": print(solution())
7
1
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class A : """simple docstring""" def __init__( self : Union[str, Any],lowercase_ : int,lowercase_ : List[Any]=3,lowercase_ : Dict=3_2,lowercase_ : List[str]=3,lowercase_ : List[str]=1_0,lowercase_ : List[Any]=[8, 1_6, 3_2, 6_4],lowercase_ : Any=[1, 1, 2, 1],lowercase_ : Any=True,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]="relu",lowercase_ : int=3,lowercase_ : Tuple=None,lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"],lowercase_ : Tuple=[2, 3, 4],lowercase_ : List[Any]=1,)-> int: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = num_channels A__ = embeddings_size A__ = hidden_sizes A__ = depths A__ = is_training A__ = use_labels A__ = hidden_act A__ = num_labels A__ = scope A__ = len(lowercase_ ) A__ = out_features A__ = out_indices A__ = num_groups def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size],self.num_labels ) A__ = self.get_config() return config, pixel_values, labels def snake_case__ ( self : List[str] )-> Tuple: '''simple docstring''' return BitConfig( num_channels=self.num_channels,embeddings_size=self.embeddings_size,hidden_sizes=self.hidden_sizes,depths=self.depths,hidden_act=self.hidden_act,num_labels=self.num_labels,out_features=self.out_features,out_indices=self.out_indices,num_groups=self.num_groups,) def snake_case__ ( self : Dict,lowercase_ : int,lowercase_ : Optional[int],lowercase_ : Optional[Any] )-> Dict: '''simple docstring''' A__ = BitModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape,(self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2),) def snake_case__ ( self : Union[str, Any],lowercase_ : List[Any],lowercase_ : Tuple,lowercase_ : Tuple )-> Optional[int]: '''simple docstring''' A__ = self.num_labels A__ = BitForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def snake_case__ ( self : int,lowercase_ : str,lowercase_ : Dict,lowercase_ : List[str] )-> Union[str, Any]: '''simple docstring''' A__ = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ),len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ),len(config.out_features ) ) self.parent.assertListEqual(model.channels,config.hidden_sizes[1:] ) # verify backbone works with out_features=None A__ = None A__ = BitBackbone(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ),1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ),[self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ),1 ) self.parent.assertListEqual(model.channels,[config.hidden_sizes[-1]] ) def snake_case__ ( self : Tuple )-> Optional[int]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () lowerCamelCase = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : Union[str, Any] )-> int: '''simple docstring''' A__ = BitModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_ ) def snake_case__ ( self : Dict )-> Tuple: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' return @unittest.skip(reason='Bit does not output attentions' ) def snake_case__ ( self : Tuple )-> Tuple: '''simple docstring''' pass @unittest.skip(reason='Bit does not use inputs_embeds' ) def snake_case__ ( self : Dict )-> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='Bit does not support input and output embeddings' ) def snake_case__ ( self : Optional[Any] )-> Optional[Any]: '''simple docstring''' pass def snake_case__ ( self : List[str] )-> int: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1],lowercase_ ) def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : List[str] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowercase_ ) def snake_case__ ( self : Any )-> Any: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(config=lowercase_ ) for name, module in model.named_modules(): if isinstance(lowercase_,(nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ),msg=F'Parameter {name} of model {model_class} seems not properly initialized',) self.assertTrue( torch.all(module.bias == 0 ),msg=F'Parameter {name} of model {model_class} seems not properly initialized',) def snake_case__ ( self : Dict )-> Tuple: '''simple docstring''' def check_hidden_states_output(lowercase_ : Any,lowercase_ : Union[str, Any],lowercase_ : Dict ): A__ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(lowercase_,lowercase_ ) ) A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A__ = self.model_tester.num_stages self.assertEqual(len(lowercase_ ),expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ),[self.model_tester.image_size // 4, self.model_tester.image_size // 4],) A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: A__ = layer_type A__ = True check_hidden_states_output(lowercase_,lowercase_,lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(lowercase_,lowercase_,lowercase_ ) @unittest.skip(reason='Bit does not use feedforward chunking' ) def snake_case__ ( self : int )-> str: '''simple docstring''' pass def snake_case__ ( self : Any )-> Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def snake_case__ ( self : str )-> str: '''simple docstring''' for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = BitModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def _snake_case( ) -> str: '''simple docstring''' A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : Tuple )-> Optional[int]: '''simple docstring''' return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def snake_case__ ( self : Optional[int] )-> Dict: '''simple docstring''' A__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=lowercase_,return_tensors='pt' ).to(lowercase_ ) # forward pass with torch.no_grad(): A__ = model(**lowercase_ ) # verify the logits A__ = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape,lowercase_ ) A__ = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3],lowercase_,atol=1E-4 ) ) @require_torch class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = (BitBackbone,) if is_torch_available() else () lowerCamelCase = BitConfig lowerCamelCase = False def snake_case__ ( self : Tuple )-> Optional[int]: '''simple docstring''' A__ = BitModelTester(self )
7
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin lowercase_ = False @skip_mps class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = StableDiffusionAttendAndExcitePipeline lowerCamelCase = False lowerCamelCase = TEXT_TO_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} ) lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def snake_case__ ( cls : Any )-> Optional[Any]: '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(lowercase_ ) @classmethod def snake_case__ ( cls : Optional[Any] )-> Dict: '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(lowercase_ ) def snake_case__ ( self : List[str] )-> int: '''simple docstring''' torch.manual_seed(0 ) A__ = UNetaDConditionModel( block_out_channels=(3_2, 6_4),layers_per_block=1,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'),up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'),cross_attention_dim=3_2,attention_head_dim=(2, 4),use_linear_projection=lowercase_,) A__ = DDIMScheduler( beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,) torch.manual_seed(0 ) A__ = AutoencoderKL( block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,) torch.manual_seed(0 ) A__ = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,) A__ = CLIPTextModel(lowercase_ ) A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) A__ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : List[Any]=0 )-> int: '''simple docstring''' if str(lowercase_ ).startswith('mps' ): A__ = torch.manual_seed(lowercase_ ) else: A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) A__ = A__ = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def snake_case__ ( self : List[str] )-> Optional[Any]: '''simple docstring''' A__ = 'cpu' A__ = self.get_dummy_components() A__ = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) A__ = self.get_dummy_inputs(lowercase_ ) A__ = pipe(**lowercase_ ).images A__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape,(1, 6_4, 6_4, 3) ) A__ = np.array( [0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] ) A__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_,1E-3 ) def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def snake_case__ ( self : str )-> int: '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def snake_case__ ( self : str )-> Optional[int]: '''simple docstring''' self._test_inference_batch_single_identical(batch_size=2,expected_max_diff=7E-4 ) def snake_case__ ( self : Optional[Any] )-> int: '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def snake_case__ ( self : Dict )-> Any: '''simple docstring''' super().test_save_load_local(expected_max_difference=5E-4 ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class A ( unittest.TestCase ): """simple docstring""" @classmethod def snake_case__ ( cls : Any )-> Optional[int]: '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(lowercase_ ) @classmethod def snake_case__ ( cls : int )-> List[Any]: '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(lowercase_ ) def snake_case__ ( self : List[Any] )-> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' A__ = torch.manual_seed(5_1 ) A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4',safety_checker=lowercase_,torch_dtype=torch.floataa ) pipe.to('cuda' ) A__ = 'a painting of an elephant with glasses' A__ = [5, 7] A__ = pipe( prompt=lowercase_,token_indices=lowercase_,guidance_scale=7.5,generator=lowercase_,num_inference_steps=5,max_iter_to_alter=5,output_type='numpy',).images[0] A__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' ) assert np.abs((expected_image - image).max() ) < 5E-1
7
1
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str: '''simple docstring''' A__ = [False] * len(SCREAMING_SNAKE_CASE__ ) A__ = [-1] * len(SCREAMING_SNAKE_CASE__ ) def dfs(SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ): A__ = True A__ = c for u in graph[v]: if not visited[u]: dfs(SCREAMING_SNAKE_CASE__ , 1 - c ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if not visited[i]: dfs(SCREAMING_SNAKE_CASE__ , 0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph lowercase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
7
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowercase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : tuple , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , ) -> Union[str, Any]: '''simple docstring''' output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , ) else: export( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , ) @torch.no_grad() def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> Tuple: '''simple docstring''' A__ = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): A__ = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: A__ = 'cpu' A__ = Path(SCREAMING_SNAKE_CASE__ ) # VAE DECODER A__ = AutoencoderKL.from_pretrained(model_path + '/vae' ) A__ = vae_decoder.config.latent_channels # forward only through the decoder part A__ = vae_decoder.decode onnx_export( SCREAMING_SNAKE_CASE__ , model_args=( torch.randn(1 , SCREAMING_SNAKE_CASE__ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=SCREAMING_SNAKE_CASE__ , ) del vae_decoder if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowercase_ = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
7
1
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ) -> Tuple: '''simple docstring''' if attention_mask is None: A__ = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: A__ = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: A__ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if decoder_head_mask is None: A__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE__ ) if cross_attn_head_mask is None: A__ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class A : """simple docstring""" def __init__( self : int,lowercase_ : List[Any],lowercase_ : List[Any]=1_3,lowercase_ : Any=7,lowercase_ : Union[str, Any]=True,lowercase_ : Union[str, Any]=False,lowercase_ : List[Any]=9_9,lowercase_ : int=1_6,lowercase_ : str=2,lowercase_ : int=4,lowercase_ : Union[str, Any]=4,lowercase_ : str="relu",lowercase_ : int=0.1,lowercase_ : Union[str, Any]=0.1,lowercase_ : Any=0.0,lowercase_ : List[Any]=0.0,lowercase_ : int=2_0,lowercase_ : Optional[int]=2,lowercase_ : List[Any]=1,lowercase_ : int=0,)-> str: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = encoder_layerdrop A__ = decoder_layerdrop A__ = max_position_embeddings A__ = eos_token_id A__ = pad_token_id A__ = bos_token_id def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) A__ = self.eos_token_id # Eos Token A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input A__ = input_ids.clamp(self.pad_token_id + 1 ) A__ = decoder_input_ids.clamp(self.pad_token_id + 1 ) A__ = self.get_config() A__ = prepare_mam_aaa_inputs_dict(lowercase_,lowercase_,lowercase_ ) return config, inputs_dict def snake_case__ ( self : int )-> Tuple: '''simple docstring''' return MaMaaaConfig( vocab_size=self.vocab_size,d_model=self.hidden_size,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,encoder_layerdrop=self.encoder_layerdrop,decoder_layerdrop=self.decoder_layerdrop,max_position_embeddings=self.max_position_embeddings,eos_token_id=self.eos_token_id,bos_token_id=self.bos_token_id,pad_token_id=self.pad_token_id,) def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' A__ , A__ = self.prepare_config_and_inputs() return config, inputs_dict def snake_case__ ( self : str,lowercase_ : List[Any],lowercase_ : Optional[Any] )-> List[Any]: '''simple docstring''' A__ = MaMaaaModel(config=lowercase_ ).get_decoder().to(lowercase_ ).eval() A__ = inputs_dict['input_ids'] A__ = inputs_dict['attention_mask'] A__ = inputs_dict['head_mask'] # first forward pass A__ = model(lowercase_,attention_mask=lowercase_,head_mask=lowercase_,use_cache=lowercase_ ) A__ , A__ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3),config.vocab_size ) A__ = ids_tensor((self.batch_size, 3),2 ) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens],dim=-1 ) A__ = torch.cat([attention_mask, next_attn_mask],dim=-1 ) A__ = model(lowercase_,attention_mask=lowercase_ )['last_hidden_state'] A__ = model(lowercase_,attention_mask=lowercase_,past_key_values=lowercase_ )[ 'last_hidden_state' ] # select random slice A__ = ids_tensor((1,),output_from_past.shape[-1] ).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-2 ) ) def snake_case__ ( self : Tuple,lowercase_ : List[str],lowercase_ : str )-> Tuple: '''simple docstring''' A__ = MaMaaaModel(config=lowercase_ ).to(lowercase_ ).eval() A__ = model(**lowercase_ ) A__ = outputs.encoder_last_hidden_state A__ = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: A__ = model.get_encoder() encoder.save_pretrained(lowercase_ ) A__ = MaMaaaEncoder.from_pretrained(lowercase_ ).to(lowercase_ ) A__ = encoder(inputs_dict['input_ids'],attention_mask=inputs_dict['attention_mask'] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) with tempfile.TemporaryDirectory() as tmpdirname: A__ = model.get_decoder() decoder.save_pretrained(lowercase_ ) A__ = MaMaaaDecoder.from_pretrained(lowercase_ ).to(lowercase_ ) A__ = decoder( input_ids=inputs_dict['decoder_input_ids'],attention_mask=inputs_dict['decoder_attention_mask'],encoder_hidden_states=lowercase_,encoder_attention_mask=inputs_dict['attention_mask'],)[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) lowerCamelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else () lowerCamelCase = ( { 'conversational': MaMaaaForConditionalGeneration, 'feature-extraction': MaMaaaModel, 'summarization': MaMaaaForConditionalGeneration, 'text2text-generation': MaMaaaForConditionalGeneration, 'translation': MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) lowerCamelCase = True lowerCamelCase = True lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : Dict,lowercase_ : Union[str, Any],lowercase_ : str,lowercase_ : List[str],lowercase_ : List[str],lowercase_ : int )-> Optional[int]: '''simple docstring''' if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def snake_case__ ( self : Any )-> Dict: '''simple docstring''' A__ = MaMaaaModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_ ) def snake_case__ ( self : str )-> int: '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) A__ , A__ = model_class.from_pretrained(lowercase_,output_loading_info=lowercase_ ) self.assertEqual(info['missing_keys'],[] ) def snake_case__ ( self : int )-> Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase_ ) def snake_case__ ( self : Optional[int] )-> Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowercase_ ) def snake_case__ ( self : int )-> int: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): A__ = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = copy.deepcopy(self._prepare_for_class(lowercase_,lowercase_ ) ) if not self.is_encoder_decoder: A__ = inputs['input_ids'] del inputs["input_ids"] else: A__ = inputs['input_ids'] A__ = inputs.get('decoder_input_ids',lowercase_ ) del inputs["input_ids"] inputs.pop('decoder_input_ids',lowercase_ ) A__ = model.get_input_embeddings() if not self.is_encoder_decoder: A__ = wte(lowercase_ ) else: A__ = wte(lowercase_ ) A__ = wte(lowercase_ ) with torch.no_grad(): model(**lowercase_ )[0] def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs() A__ = input_dict['input_ids'] A__ = input_ids.ne(1 ).to(lowercase_ ) A__ = MaMaaaForConditionalGeneration(lowercase_ ).eval().to(lowercase_ ) if torch_device == "cuda": model.half() model.generate(lowercase_,attention_mask=lowercase_ ) model.generate(num_beams=4,do_sample=lowercase_,early_stopping=lowercase_,num_return_sequences=3 ) def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]: '''simple docstring''' return torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) lowercase_ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class A ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : Any )-> Any: '''simple docstring''' return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' ) def snake_case__ ( self : Optional[Any] )-> int: '''simple docstring''' A__ = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(lowercase_ ) A__ = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) A__ = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) A__ = prepare_mam_aaa_inputs_dict(model.config,lowercase_,lowercase_ ) with torch.no_grad(): A__ = model(**lowercase_ )[0] A__ = torch.Size((1, 1_1, 1_0_2_4) ) self.assertEqual(output.shape,lowercase_ ) # change to expected output here A__ = torch.tensor( [[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]],device=lowercase_ ) self.assertTrue(torch.allclose(output[:, :3, :3],lowercase_,atol=lowercase_ ) ) def snake_case__ ( self : Optional[int] )-> Optional[Any]: '''simple docstring''' A__ = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(lowercase_ ) # change to intended input A__ = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) A__ = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) A__ = prepare_mam_aaa_inputs_dict(model.config,lowercase_,lowercase_ ) with torch.no_grad(): A__ = model(**lowercase_ )[0] A__ = torch.Size((1, 1_1, model.config.vocab_size) ) self.assertEqual(output.shape,lowercase_ ) # change to expected output here A__ = torch.tensor( [[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]],device=lowercase_ ) self.assertTrue(torch.allclose(output[:, :3, :3],lowercase_,atol=lowercase_ ) ) def snake_case__ ( self : List[Any] )-> List[Any]: '''simple docstring''' A__ = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(lowercase_ ) A__ = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M',src_lang='fr',tgt_lang='en' ) A__ = [ 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent' ' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de' ' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.', ] # The below article tests that we don't add any hypotheses outside of the top n_beams A__ = tokenizer(lowercase_,padding=lowercase_,return_tensors='pt' ) A__ = model.generate( input_ids=dct['input_ids'].to(lowercase_ ),attention_mask=dct['attention_mask'].to(lowercase_ ),num_beams=5,forced_bos_token_id=tokenizer.get_lang_id('en' ),) A__ = [ 'The NSA case highlights the total absence of intelligence debate', 'I think there are two levels of response from the French government.', 'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.' ' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all' ' communications in France.', ] A__ = tokenizer.batch_decode( hypotheses_batch.tolist(),clean_up_tokenization_spaces=lowercase_,skip_special_tokens=lowercase_ ) assert generated == expected_en
7
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = (DPMSolverSinglestepScheduler,) lowerCamelCase = (('num_inference_steps', 25),) def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Optional[int]: '''simple docstring''' A__ = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'prediction_type': 'epsilon', 'thresholding': False, 'sample_max_value': 1.0, 'algorithm_type': 'dpmsolver++', 'solver_type': 'midpoint', 'lambda_min_clipped': -float('inf' ), 'variance_type': None, } config.update(**lowercase_ ) return config def snake_case__ ( self : str,lowercase_ : Optional[Any]=0,**lowercase_ : Any )-> List[Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ , A__ = sample, sample for t in range(lowercase_,time_step + scheduler.config.solver_order + 1 ): A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : List[str] )-> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any]=0,**lowercase_ : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=None,**lowercase_ : int )-> int: '''simple docstring''' if scheduler is None: A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample return sample def snake_case__ ( self : Any )-> str: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = 5_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_574 ) < 1E-3 def snake_case__ ( self : Optional[Any] )-> List[Any]: '''simple docstring''' for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowercase_ ) def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 A__ = DEISMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverMultistepScheduler.from_config(scheduler.config ) A__ = UniPCMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Tuple )-> Any: '''simple docstring''' self.check_over_configs(thresholding=lowercase_ ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowercase_,prediction_type=lowercase_,sample_max_value=lowercase_,algorithm_type='dpmsolver++',solver_order=lowercase_,solver_type=lowercase_,) def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) A__ = self.full_loop( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers" def snake_case__ ( self : Optional[int] )-> Tuple: '''simple docstring''' self.check_over_configs(lower_order_final=lowercase_ ) self.check_over_configs(lower_order_final=lowercase_ ) def snake_case__ ( self : Tuple )-> Optional[int]: '''simple docstring''' self.check_over_configs(lambda_min_clipped=-float('inf' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def snake_case__ ( self : Optional[Any] )-> Tuple: '''simple docstring''' self.check_over_configs(variance_type=lowercase_ ) self.check_over_configs(variance_type='learned_range' ) def snake_case__ ( self : str )-> Any: '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=lowercase_,time_step=0 ) def snake_case__ ( self : Tuple )-> Tuple: '''simple docstring''' A__ = self.full_loop() A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Any )-> Union[str, Any]: '''simple docstring''' A__ = self.full_loop(use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_248 ) < 1E-3 def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction' ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.1_453 ) < 1E-3 def snake_case__ ( self : Tuple )-> int: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction',use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.0_649 ) < 1E-3 def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(thresholding=lowercase_,dynamic_thresholding_ratio=0 ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter.half() scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample assert sample.dtype == torch.floataa
7
1
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowercase_ = logging.get_logger(__name__) class A ( _UpperCAmelCase ): """simple docstring""" def __init__( self : str,*lowercase_ : Union[str, Any],**lowercase_ : Union[str, Any] )-> None: '''simple docstring''' warnings.warn( 'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use BeitImageProcessor instead.',lowercase_,) super().__init__(*lowercase_,**lowercase_ )
7
class A : """simple docstring""" def __init__( self : Any,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : List[str] )-> List[Any]: '''simple docstring''' A__ = name A__ = value A__ = weight def __repr__( self : int )-> Tuple: '''simple docstring''' return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def snake_case__ ( self : Any )-> str: '''simple docstring''' return self.value def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' return self.name def snake_case__ ( self : Any )-> Dict: '''simple docstring''' return self.weight def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' return self.value / self.weight def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: '''simple docstring''' A__ = [] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Any: '''simple docstring''' A__ = sorted(SCREAMING_SNAKE_CASE__ , key=SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ) A__ = [] A__ , A__ = 0.0, 0.0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _snake_case( ) -> Any: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
7
1
from math import factorial, pi def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int = 30 ) -> float: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): raise ValueError('maclaurin_sin() requires either an int or float for theta' ) if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or accuracy <= 0: raise ValueError('maclaurin_sin() requires a positive int for accuracy' ) A__ = float(SCREAMING_SNAKE_CASE__ ) A__ = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(SCREAMING_SNAKE_CASE__ ) ) def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int = 30 ) -> float: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): raise ValueError('maclaurin_cos() requires either an int or float for theta' ) if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or accuracy <= 0: raise ValueError('maclaurin_cos() requires a positive int for accuracy' ) A__ = float(SCREAMING_SNAKE_CASE__ ) A__ = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
7
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase_ = logging.get_logger(__name__) lowercase_ = { "microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json", } class A ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'resnet' lowerCamelCase = ['basic', 'bottleneck'] def __init__( self : Optional[Any],lowercase_ : int=3,lowercase_ : List[str]=6_4,lowercase_ : int=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8],lowercase_ : Tuple=[3, 4, 6, 3],lowercase_ : Union[str, Any]="bottleneck",lowercase_ : List[str]="relu",lowercase_ : Tuple=False,lowercase_ : List[str]=None,lowercase_ : List[Any]=None,**lowercase_ : str,)-> Optional[Any]: '''simple docstring''' super().__init__(**lowercase_ ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) A__ = num_channels A__ = embedding_size A__ = hidden_sizes A__ = depths A__ = layer_type A__ = hidden_act A__ = downsample_in_first_stage A__ = ['stem'] + [F'stage{idx}' for idx in range(1,len(lowercase_ ) + 1 )] A__ , A__ = get_aligned_output_features_output_indices( out_features=lowercase_,out_indices=lowercase_,stage_names=self.stage_names ) class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = version.parse('1.11' ) @property def snake_case__ ( self : List[Any] )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def snake_case__ ( self : Any )-> float: '''simple docstring''' return 1E-3
7
1
from manim import * class A ( _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' A__ = Rectangle(height=0.5,width=0.5 ) A__ = Rectangle(height=0.46,width=0.46 ).set_stroke(width=0 ) A__ = [mem.copy() for i in range(6 )] A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 ) A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 ) A__ = VGroup(lowercase_,lowercase_ ).arrange(lowercase_,buff=0 ) A__ = Text('CPU',font_size=2_4 ) A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase_ ) A__ = [mem.copy() for i in range(4 )] A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 ) A__ = Text('GPU',font_size=2_4 ) A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowercase_ ) A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 ) A__ = Text('Model',font_size=2_4 ) A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,buff=0.5,aligned_edge=lowercase_ ) model.move_to([3, -1.0, 0] ) self.add(lowercase_ ) A__ = [] for i, rect in enumerate(lowercase_ ): rect.set_stroke(lowercase_ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) A__ = Rectangle(height=0.46 / 4,width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_,opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ),buff=0.02,direction=lowercase_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0],direction=lowercase_,buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1],direction=lowercase_,buff=0.0 ) self.add(lowercase_ ) cpu_targs.append(lowercase_ ) A__ = [mem.copy() for i in range(6 )] A__ = VGroup(*lowercase_ ).arrange(lowercase_,buff=0 ) A__ = Text('Loaded Checkpoint',font_size=2_4 ) A__ = Group(lowercase_,lowercase_ ).arrange(lowercase_,aligned_edge=lowercase_,buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) A__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) A__ = MarkupText( F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model',font_size=1_8,) key_text.move_to([-5, 2.4, 0] ) self.add(lowercase_,lowercase_ ) A__ = MarkupText( F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint',font_size=1_8,) blue_text.next_to(lowercase_,DOWN * 2.4,aligned_edge=key_text.get_left() ) A__ = MarkupText( F'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.',font_size=2_4,) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase_ ),Write(lowercase_ ) ) self.play(Write(lowercase_,run_time=1 ),Create(lowercase_,run_time=1 ) ) A__ = [] A__ = [] for i, rect in enumerate(lowercase_ ): A__ = fill.copy().set_fill(lowercase_,opacity=0.7 ) target.move_to(lowercase_ ) first_animations.append(GrowFromCenter(lowercase_,run_time=1 ) ) A__ = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(lowercase_,run_time=1.5 ) ) self.play(*lowercase_ ) self.play(*lowercase_ ) self.wait()
7
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 't5' lowerCamelCase = ['past_key_values'] lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Union[str, Any],lowercase_ : int=3_2_1_2_8,lowercase_ : int=5_1_2,lowercase_ : List[str]=6_4,lowercase_ : Tuple=2_0_4_8,lowercase_ : Any=6,lowercase_ : List[str]=None,lowercase_ : Union[str, Any]=8,lowercase_ : int=3_2,lowercase_ : Dict=1_2_8,lowercase_ : Optional[int]=0.1,lowercase_ : List[str]=1E-6,lowercase_ : Tuple=1.0,lowercase_ : Any="relu",lowercase_ : Union[str, Any]=True,lowercase_ : Optional[Any]=True,lowercase_ : int=0,lowercase_ : str=1,**lowercase_ : str,)-> Any: '''simple docstring''' A__ = vocab_size A__ = d_model A__ = d_kv A__ = d_ff A__ = num_layers A__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry A__ = num_heads A__ = relative_attention_num_buckets A__ = relative_attention_max_distance A__ = dropout_rate A__ = layer_norm_epsilon A__ = initializer_factor A__ = feed_forward_proj A__ = use_cache A__ = self.feed_forward_proj.split('-' ) A__ = act_info[-1] A__ = act_info[0] == 'gated' if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": A__ = 'gelu_new' super().__init__( pad_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,**lowercase_,) class A ( _UpperCAmelCase ): """simple docstring""" @property def snake_case__ ( self : Tuple )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' A__ = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: A__ = 'past_encoder_sequence + sequence' A__ = {0: 'batch'} A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: A__ = {0: 'batch', 1: 'decoder_sequence'} A__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowercase_,direction='inputs' ) return common_inputs @property def snake_case__ ( self : Any )-> int: '''simple docstring''' return 1_3
7
1
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return EnvironmentCommand() class A ( _UpperCAmelCase ): """simple docstring""" @staticmethod def snake_case__ ( lowercase_ : ArgumentParser )-> Dict: '''simple docstring''' A__ = parser.add_parser('env' ) download_parser.set_defaults(func=lowercase_ ) def snake_case__ ( self : List[Any] )-> List[str]: '''simple docstring''' A__ = huggingface_hub.__version__ A__ = 'not installed' A__ = 'NA' if is_torch_available(): import torch A__ = torch.__version__ A__ = torch.cuda.is_available() A__ = 'not installed' if is_transformers_available(): import transformers A__ = transformers.__version__ A__ = 'not installed' if is_accelerate_available(): import accelerate A__ = accelerate.__version__ A__ = 'not installed' if is_xformers_available(): import xformers A__ = xformers.__version__ A__ = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})', 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(lowercase_ ) ) return info @staticmethod def snake_case__ ( lowercase_ : int )-> Optional[Any]: '''simple docstring''' return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
7
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: A__ = mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: A__ = max( mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - wt[i - 1] ) + val[i - 1] , ) A__ = val return f[i][j] def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple: '''simple docstring''' A__ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: A__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: A__ = dp[i - 1][w_] return dp[n][w_], dp def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ) -> Union[str, Any]: '''simple docstring''' if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) A__ = len(SCREAMING_SNAKE_CASE__ ) if num_items != len(SCREAMING_SNAKE_CASE__ ): A__ = ( 'The number of weights must be the same as the number of values.\n' f'But got {num_items} weights and {len(SCREAMING_SNAKE_CASE__ )} values' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ): if not isinstance(wt[i] , SCREAMING_SNAKE_CASE__ ): A__ = ( 'All weights must be integers but got weight of ' f'type {type(wt[i] )} at index {i}' ) raise TypeError(SCREAMING_SNAKE_CASE__ ) A__ , A__ = knapsack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = set() _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return optimal_val, example_optional_set def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : set ) -> Optional[int]: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: optimal_set.add(SCREAMING_SNAKE_CASE__ ) _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , j - wt[i - 1] , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = [3, 2, 4, 4] lowercase_ = [4, 3, 2, 3] lowercase_ = 4 lowercase_ = 6 lowercase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowercase_ , lowercase_ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowercase_ , lowercase_ = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
7
1
import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor lowercase_ = logging.get_logger(__name__) class A ( _UpperCAmelCase ): """simple docstring""" def __init__( self : Any,*lowercase_ : Any,**lowercase_ : str )-> None: '''simple docstring''' warnings.warn( 'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ChineseCLIPImageProcessor instead.',lowercase_,) super().__init__(*lowercase_,**lowercase_ )
7
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = AlbertTokenizer lowerCamelCase = AlbertTokenizerFast lowerCamelCase = True lowerCamelCase = True lowerCamelCase = True def snake_case__ ( self : Dict )-> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = AlbertTokenizer(lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : List[str],lowercase_ : str )-> Any: '''simple docstring''' A__ = 'this is a test' A__ = 'this is a test' return input_text, output_text def snake_case__ ( self : List[Any] )-> Optional[int]: '''simple docstring''' A__ = '<pad>' A__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : List[str] )-> str: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0],'<pad>' ) self.assertEqual(vocab_keys[1],'<unk>' ) self.assertEqual(vocab_keys[-1],'▁eloquent' ) self.assertEqual(len(lowercase_ ),3_0_0_0_0 ) def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size,3_0_0_0_0 ) def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = 'I was born in 92000, and this is falsé.' A__ = tokenizer.tokenize(lowercase_ ) A__ = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(lowercase_ ) A__ = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) def snake_case__ ( self : int )-> int: '''simple docstring''' A__ = AlbertTokenizer(lowercase_,keep_accents=lowercase_ ) A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁this', '▁is', '▁a', '▁test'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[4_8, 2_5, 2_1, 1_2_8_9] ) A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] ) A__ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual(lowercase_,[3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] ) A__ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'],) def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' A__ = AlbertTokenizer(lowercase_ ) A__ = tokenizer.encode('sequence builders' ) A__ = tokenizer.encode('multi-sequence build' ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' A__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='albert-base-v2',revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e',)
7
1
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 1000000 ) -> int: '''simple docstring''' A__ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
7
from typing import Dict from .base import GenericTensor, Pipeline class A ( _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : int,lowercase_ : Dict=None,lowercase_ : Tuple=None,lowercase_ : List[Any]=None,**lowercase_ : Any )-> Optional[Any]: '''simple docstring''' if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def snake_case__ ( self : Dict,lowercase_ : List[Any],**lowercase_ : Tuple )-> Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework A__ = self.tokenizer(lowercase_,return_tensors=lowercase_,**lowercase_ ) return model_inputs def snake_case__ ( self : Tuple,lowercase_ : int )-> Optional[Any]: '''simple docstring''' A__ = self.model(**lowercase_ ) return model_outputs def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : List[str]=False )-> Any: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[Any],*lowercase_ : int,**lowercase_ : Optional[Any] )-> int: '''simple docstring''' return super().__call__(*lowercase_,**lowercase_ )
7
1
from __future__ import annotations import math def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> list[int]: '''simple docstring''' A__ = str(SCREAMING_SNAKE_CASE__ ) A__ = [n] for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> bool: '''simple docstring''' if len(str(SCREAMING_SNAKE_CASE__ ) ) > 3: if not is_prime(int(str(SCREAMING_SNAKE_CASE__ )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE__ )[:3] ) ): return False return True def _snake_case( SCREAMING_SNAKE_CASE__ : int = 11 ) -> list[int]: '''simple docstring''' A__ = [] A__ = 13 while len(SCREAMING_SNAKE_CASE__ ) != count: if validate(SCREAMING_SNAKE_CASE__ ): A__ = list_truncated_nums(SCREAMING_SNAKE_CASE__ ) if all(is_prime(SCREAMING_SNAKE_CASE__ ) for i in list_nums ): list_truncated_primes.append(SCREAMING_SNAKE_CASE__ ) num += 2 return list_truncated_primes def _snake_case( ) -> int: '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f"""{sum(compute_truncated_primes(11)) = }""")
7
from timeit import timeit def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) A__ = 0 while number: number &= number - 1 result += 1 return result def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) A__ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def _snake_case( ) -> None: '''simple docstring''' def do_benchmark(SCREAMING_SNAKE_CASE__ : int ) -> None: A__ = 'import __main__ as z' print(f'Benchmark when {number = }:' ) print(f'{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE__ ) = }' ) A__ = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=SCREAMING_SNAKE_CASE__ ) print(f'timeit() runs in {timing} seconds' ) print(f'{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE__ ) = }' ) A__ = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=SCREAMING_SNAKE_CASE__ , ) print(f'timeit() runs in {timing} seconds' ) for number in (25, 37, 58, 0): do_benchmark(SCREAMING_SNAKE_CASE__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
7
1
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: '''simple docstring''' A__ = 0 A__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None A__ = sorted_collection[point] if current_item == item: return point else: if point < left: A__ = left A__ = point elif point > right: A__ = right A__ = point else: if item < current_item: A__ = point - 1 else: A__ = point + 1 return None def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif point > right: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 ) else: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' if collection != sorted(SCREAMING_SNAKE_CASE__ ): raise ValueError('Collection must be ascending sorted' ) return True if __name__ == "__main__": import sys lowercase_ = 0 if debug == 1: lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") lowercase_ = 67 lowercase_ = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("Not found")
7
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int: '''simple docstring''' A__ = 384 A__ = 7 if "tiny" in model_name: A__ = 96 A__ = (2, 2, 6, 2) A__ = (3, 6, 12, 24) elif "small" in model_name: A__ = 96 A__ = (2, 2, 18, 2) A__ = (3, 6, 12, 24) elif "base" in model_name: A__ = 128 A__ = (2, 2, 18, 2) A__ = (4, 8, 16, 32) A__ = 12 A__ = 512 elif "large" in model_name: A__ = 192 A__ = (2, 2, 18, 2) A__ = (6, 12, 24, 48) A__ = 12 A__ = 768 # set label information A__ = 150 A__ = 'huggingface/label-files' A__ = 'ade20k-id2label.json' A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) ) A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} A__ = {v: k for k, v in idalabel.items()} A__ = SwinConfig( embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) A__ = UperNetConfig( backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , ) return config def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: '''simple docstring''' A__ = [] # fmt: off # stem rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]: '''simple docstring''' A__ = dct.pop(SCREAMING_SNAKE_CASE__ ) A__ = val def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: '''simple docstring''' A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): A__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' ) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[:dim, :] A__ = in_proj_bias[: dim] A__ = in_proj_weight[ dim : dim * 2, : ] A__ = in_proj_bias[ dim : dim * 2 ] A__ = in_proj_weight[ -dim :, : ] A__ = in_proj_bias[-dim :] # fmt: on def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 ) A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 ) A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(4 , in_channel // 4 ) A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(in_channel // 4 , 4 ) A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A__ = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } A__ = model_name_to_url[model_name] A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[ 'state_dict' ] for name, param in state_dict.items(): print(SCREAMING_SNAKE_CASE__ , param.shape ) A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ ) A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "bn" in key: A__ = key.replace('bn' , 'batch_norm' ) A__ = val # rename keys A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ ) if "norm" in key: A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # verify on image A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' ) A__ = SegformerImageProcessor() A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits print(logits.shape ) print('First values of logits:' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": A__ = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": A__ = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": A__ = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": A__ = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(f'openmmlab/{model_name}' ) processor.push_to_hub(f'openmmlab/{model_name}' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-swin-tiny", type=str, choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]], help="Name of the Swin + UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowercase_ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
7
1
import heapq def _snake_case( SCREAMING_SNAKE_CASE__ : dict ) -> set[int]: '''simple docstring''' A__ = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(SCREAMING_SNAKE_CASE__ , [-1 * len(SCREAMING_SNAKE_CASE__ ), (key, value)] ) # chosen_vertices = set of chosen vertices A__ = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices A__ = heapq.heappop(SCREAMING_SNAKE_CASE__ )[1][0] chosen_vertices.add(SCREAMING_SNAKE_CASE__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: A__ = elem[1][1].index(SCREAMING_SNAKE_CASE__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(SCREAMING_SNAKE_CASE__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
7
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed lowercase_ = "true" def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=82 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 ) -> Optional[Any]: '''simple docstring''' set_seed(42 ) A__ = RegressionModel() A__ = deepcopy(SCREAMING_SNAKE_CASE__ ) A__ = RegressionDataset(length=SCREAMING_SNAKE_CASE__ ) A__ = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) model.to(accelerator.device ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model, ddp_model, dataloader def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' ) A__ = load_dataset('glue' , 'mrpc' , split='validation' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : List[Any] ): A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs with accelerator.main_process_first(): A__ = dataset.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) A__ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(SCREAMING_SNAKE_CASE__ : Dict ): if use_longest: return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=16 ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> str: '''simple docstring''' A__ = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE__ , split_batches=SCREAMING_SNAKE_CASE__ ) A__ = get_dataloader(SCREAMING_SNAKE_CASE__ , not dispatch_batches ) A__ = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: '''simple docstring''' A__ = [] for batch in dataloader: A__ , A__ = batch.values() with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) A__ , A__ = [], [] for logit, targ in logits_and_targets: logits.append(SCREAMING_SNAKE_CASE__ ) targs.append(SCREAMING_SNAKE_CASE__ ) A__ , A__ = torch.cat(SCREAMING_SNAKE_CASE__ ), torch.cat(SCREAMING_SNAKE_CASE__ ) return logits, targs def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int=82 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=16 ) -> List[Any]: '''simple docstring''' A__ , A__ , A__ = get_basic_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ , A__ = generate_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert ( len(SCREAMING_SNAKE_CASE__ ) == num_samples ), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE__ )}' def _snake_case( SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False ) -> str: '''simple docstring''' A__ = evaluate.load('glue' , 'mrpc' ) A__ , A__ = get_mrpc_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # First do baseline A__ , A__ , A__ = setup['no'] model.to(SCREAMING_SNAKE_CASE__ ) model.eval() for batch in dataloader: batch.to(SCREAMING_SNAKE_CASE__ ) with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=batch['labels'] ) A__ = metric.compute() # Then do distributed A__ , A__ , A__ = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) A__ = batch['labels'] A__ , A__ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ ) A__ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n' def _snake_case( ) -> Optional[Any]: '''simple docstring''' A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' ) test_mrpc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' ) test_torch_metrics(SCREAMING_SNAKE_CASE__ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**' ) A__ = Accelerator() test_torch_metrics(SCREAMING_SNAKE_CASE__ , 512 ) accelerator.state._reset_state() def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' main() if __name__ == "__main__": main()
7
1
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or number < 0: raise ValueError('Input must be a non-negative integer' ) A__ = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
7
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: '''simple docstring''' A__ = 0 A__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None A__ = sorted_collection[point] if current_item == item: return point else: if point < left: A__ = left A__ = point elif point > right: A__ = right A__ = point else: if item < current_item: A__ = point - 1 else: A__ = point + 1 return None def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif point > right: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 ) else: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' if collection != sorted(SCREAMING_SNAKE_CASE__ ): raise ValueError('Collection must be ascending sorted' ) return True if __name__ == "__main__": import sys lowercase_ = 0 if debug == 1: lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") lowercase_ = 67 lowercase_ = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("Not found")
7
1
from typing import List from .keymap import KEYMAP, get_character def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]: '''simple docstring''' def decorator(SCREAMING_SNAKE_CASE__ : Any ): A__ = getattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , [] ) handle += [key] setattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , SCREAMING_SNAKE_CASE__ ) return func return decorator def _snake_case( *SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]: '''simple docstring''' def decorator(SCREAMING_SNAKE_CASE__ : List[str] ): A__ = getattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , [] ) handle += keys setattr(SCREAMING_SNAKE_CASE__ , 'handle_key' , SCREAMING_SNAKE_CASE__ ) return func return decorator class A ( _UpperCAmelCase ): """simple docstring""" def __new__( cls : List[Any],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Optional[Any] )-> Tuple: '''simple docstring''' A__ = super().__new__(cls,lowercase_,lowercase_,lowercase_ ) if not hasattr(lowercase_,'key_handler' ): setattr(lowercase_,'key_handler',{} ) setattr(lowercase_,'handle_input',KeyHandler.handle_input ) for value in attrs.values(): A__ = getattr(lowercase_,'handle_key',[] ) for key in handled_keys: A__ = value return new_cls @staticmethod def snake_case__ ( cls : str )-> Optional[Any]: '''simple docstring''' A__ = get_character() if char != KEYMAP["undefined"]: A__ = ord(lowercase_ ) A__ = cls.key_handler.get(lowercase_ ) if handler: A__ = char return handler(cls ) else: return None def _snake_case( cls : Optional[Any] ) -> Dict: '''simple docstring''' return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
7
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def _snake_case( ) -> Dict: '''simple docstring''' A__ = ArgumentParser( 'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE__ ) A__ = parser.add_subparsers(help='datasets-cli command helpers' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) TestCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) # Parse args A__ , A__ = parser.parse_known_args() if not hasattr(SCREAMING_SNAKE_CASE__ , 'func' ): parser.print_help() exit(1 ) A__ = parse_unknown_args(SCREAMING_SNAKE_CASE__ ) # Run A__ = args.func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) service.run() if __name__ == "__main__": main()
7
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase_ = { "configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", "ResNetBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "FlaxResNetForImageClassification", "FlaxResNetModel", "FlaxResNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
7
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A : """simple docstring""" def __init__( self : Union[str, Any],lowercase_ : Any,lowercase_ : Union[str, Any]=1_3,lowercase_ : Tuple=3_0,lowercase_ : List[Any]=2,lowercase_ : Optional[int]=3,lowercase_ : Union[str, Any]=True,lowercase_ : Tuple=True,lowercase_ : Any=3_2,lowercase_ : List[str]=2,lowercase_ : Optional[int]=4,lowercase_ : Union[str, Any]=3_7,lowercase_ : Tuple="gelu",lowercase_ : str=0.1,lowercase_ : Tuple=0.1,lowercase_ : Union[str, Any]=1_0,lowercase_ : int=0.02,lowercase_ : List[Any]=3,lowercase_ : Any=None,)-> Dict: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A__ = (image_size // patch_size) ** 2 A__ = num_patches + 1 def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' return ViTConfig( image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=lowercase_,initializer_range=self.initializer_range,) def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Tuple )-> Optional[Any]: '''simple docstring''' A__ = TFViTModel(config=lowercase_ ) A__ = model(lowercase_,training=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) A__ = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) ) def snake_case__ ( self : List[Any],lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : List[Any] )-> Dict: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFViTForImageClassification(lowercase_ ) A__ = model(lowercase_,labels=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ = 1 A__ = TFViTForImageClassification(lowercase_ ) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () lowerCamelCase = ( {'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' A__ = TFViTModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' pass @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Any )-> int: '''simple docstring''' pass def snake_case__ ( self : str )-> Dict: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_,tf.keras.layers.Layer ) ) def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) A__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1],lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : Optional[Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(lowercase_ ) def _snake_case( ) -> str: '''simple docstring''' A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : List[Any] )-> str: '''simple docstring''' return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def snake_case__ ( self : Any )-> Dict: '''simple docstring''' A__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=lowercase_,return_tensors='tf' ) # forward pass A__ = model(**lowercase_ ) # verify the logits A__ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape,lowercase_ ) A__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] ) tf.debugging.assert_near(outputs.logits[0, :3],lowercase_,atol=1E-4 )
7
1
from math import sqrt def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case( SCREAMING_SNAKE_CASE__ : int = 10001 ) -> int: '''simple docstring''' A__ = 0 A__ = 1 while count != nth and number < 3: number += 1 if is_prime(SCREAMING_SNAKE_CASE__ ): count += 1 while count != nth: number += 2 if is_prime(SCREAMING_SNAKE_CASE__ ): count += 1 return number if __name__ == "__main__": print(f"""{solution() = }""")
7
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class A : """simple docstring""" def __init__( self : str,lowercase_ : Any,lowercase_ : Tuple=1_3,lowercase_ : str=7,lowercase_ : Tuple=True,lowercase_ : int=True,lowercase_ : List[Any]=True,lowercase_ : List[str]=True,lowercase_ : List[str]=9_9,lowercase_ : List[Any]=6_4,lowercase_ : List[str]=5,lowercase_ : Optional[Any]=4,lowercase_ : Optional[Any]=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : int=0.1,lowercase_ : str=0.1,lowercase_ : Optional[Any]=5_1_2,lowercase_ : int=1_6,lowercase_ : List[Any]=2,lowercase_ : Union[str, Any]=0.02,lowercase_ : Tuple=3,lowercase_ : List[Any]=4,lowercase_ : str=None,)-> Union[str, Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope A__ = vocab_size - 1 def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) A__ = self.get_config() return config, input_ids, input_mask, token_labels def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,pad_token_id=self.pad_token_id,) def snake_case__ ( self : Optional[int] )-> Union[str, Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = True return config, input_ids, input_mask, token_labels def snake_case__ ( self : Any,lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : str )-> Any: '''simple docstring''' A__ = GPTNeoXModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) A__ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Union[str, Any],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Optional[Any] )-> Tuple: '''simple docstring''' A__ = True A__ = GPTNeoXModel(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : List[str] )-> List[str]: '''simple docstring''' A__ = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Dict,lowercase_ : Any )-> int: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForQuestionAnswering(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Optional[int] )-> str: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def snake_case__ ( self : Any,lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : int )-> Union[str, Any]: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForTokenClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : int,lowercase_ : str,lowercase_ : int,lowercase_ : Union[str, Any] )-> List[Any]: '''simple docstring''' A__ = True A__ = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() # first forward pass A__ = model(lowercase_,attention_mask=lowercase_,use_cache=lowercase_ ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3),config.vocab_size ) A__ = ids_tensor((self.batch_size, 3),vocab_size=2 ) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens],dim=-1 ) A__ = torch.cat([input_mask, next_mask],dim=-1 ) A__ = model(lowercase_,attention_mask=lowercase_,output_hidden_states=lowercase_ ) A__ = output_from_no_past['hidden_states'][0] A__ = model( lowercase_,attention_mask=lowercase_,past_key_values=lowercase_,output_hidden_states=lowercase_,)['hidden_states'][0] # select random slice A__ = ids_tensor((1,),output_from_past.shape[-1] ).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-3 ) ) def snake_case__ ( self : str )-> Union[str, Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowerCamelCase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = GPTNeoXModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,hidden_size=6_4,num_attention_heads=8 ) def snake_case__ ( self : Optional[Any] )-> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : List[str] )-> Any: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Dict )-> Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowercase_ ) def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_ ) def snake_case__ ( self : Any )-> List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) @unittest.skip(reason='Feed forward chunking is not implemented' ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)] ) def snake_case__ ( self : List[str],lowercase_ : Any )-> List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = ids_tensor([1, 1_0],config.vocab_size ) A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights A__ = GPTNeoXModel(lowercase_ ) original_model.to(lowercase_ ) original_model.eval() A__ = original_model(lowercase_ ).last_hidden_state A__ = original_model(lowercase_ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights A__ = {'type': scaling_type, 'factor': 10.0} A__ = GPTNeoXModel(lowercase_ ) scaled_model.to(lowercase_ ) scaled_model.eval() A__ = scaled_model(lowercase_ ).last_hidden_state A__ = scaled_model(lowercase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) else: self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) @require_torch class A ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Tuple )-> Union[str, Any]: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' ) for checkpointing in [True, False]: A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowercase_ ) A__ = tokenizer('My favorite food is',return_tensors='pt' ).to(lowercase_ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure' A__ = model.generate(**lowercase_,do_sample=lowercase_,max_new_tokens=2_0 ) A__ = tokenizer.batch_decode(lowercase_ )[0] self.assertEqual(lowercase_,lowercase_ )
7
1
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort lowercase_ = logging.get_logger(__name__) lowercase_ = { "tensor(bool)": np.bool_, "tensor(int8)": np.inta, "tensor(uint8)": np.uinta, "tensor(int16)": np.intaa, "tensor(uint16)": np.uintaa, "tensor(int32)": np.intaa, "tensor(uint32)": np.uintaa, "tensor(int64)": np.intaa, "tensor(uint64)": np.uintaa, "tensor(float16)": np.floataa, "tensor(float)": np.floataa, "tensor(double)": np.floataa, } class A : """simple docstring""" def __init__( self : Any,lowercase_ : int=None,**lowercase_ : Optional[Any] )-> Tuple: '''simple docstring''' logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' ) A__ = model A__ = kwargs.get('model_save_dir',lowercase_ ) A__ = kwargs.get('latest_model_name',lowercase_ ) def __call__( self : str,**lowercase_ : Dict )-> Any: '''simple docstring''' A__ = {k: np.array(lowercase_ ) for k, v in kwargs.items()} return self.model.run(lowercase_,lowercase_ ) @staticmethod def snake_case__ ( lowercase_ : Union[str, Path],lowercase_ : Optional[Any]=None,lowercase_ : Union[str, Any]=None )-> str: '''simple docstring''' if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider' ) A__ = 'CPUExecutionProvider' return ort.InferenceSession(lowercase_,providers=[provider],sess_options=lowercase_ ) def snake_case__ ( self : Dict,lowercase_ : Union[str, Path],lowercase_ : Optional[str] = None,**lowercase_ : List[str] )-> Any: '''simple docstring''' A__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME A__ = self.model_save_dir.joinpath(self.latest_model_name ) A__ = Path(lowercase_ ).joinpath(lowercase_ ) try: shutil.copyfile(lowercase_,lowercase_ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) A__ = self.model_save_dir.joinpath(lowercase_ ) if src_path.exists(): A__ = Path(lowercase_ ).joinpath(lowercase_ ) try: shutil.copyfile(lowercase_,lowercase_ ) except shutil.SameFileError: pass def snake_case__ ( self : Tuple,lowercase_ : Union[str, os.PathLike],**lowercase_ : Any,)-> List[str]: '''simple docstring''' if os.path.isfile(lowercase_ ): logger.error(F'Provided path ({save_directory}) should be a directory, not a file' ) return os.makedirs(lowercase_,exist_ok=lowercase_ ) # saving model weights/files self._save_pretrained(lowercase_,**lowercase_ ) @classmethod def snake_case__ ( cls : Union[str, Any],lowercase_ : Union[str, Path],lowercase_ : Optional[Union[bool, str, None]] = None,lowercase_ : Optional[Union[str, None]] = None,lowercase_ : bool = False,lowercase_ : Optional[str] = None,lowercase_ : Optional[str] = None,lowercase_ : Optional[str] = None,lowercase_ : Optional["ort.SessionOptions"] = None,**lowercase_ : int,)-> List[str]: '''simple docstring''' A__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(lowercase_ ): A__ = OnnxRuntimeModel.load_model( os.path.join(lowercase_,lowercase_ ),provider=lowercase_,sess_options=lowercase_ ) A__ = Path(lowercase_ ) # load model from hub else: # download model A__ = hf_hub_download( repo_id=lowercase_,filename=lowercase_,use_auth_token=lowercase_,revision=lowercase_,cache_dir=lowercase_,force_download=lowercase_,) A__ = Path(lowercase_ ).parent A__ = Path(lowercase_ ).name A__ = OnnxRuntimeModel.load_model(lowercase_,provider=lowercase_,sess_options=lowercase_ ) return cls(model=lowercase_,**lowercase_ ) @classmethod def snake_case__ ( cls : str,lowercase_ : Union[str, Path],lowercase_ : bool = True,lowercase_ : Optional[str] = None,lowercase_ : Optional[str] = None,**lowercase_ : Tuple,)-> Tuple: '''simple docstring''' A__ = None if len(str(lowercase_ ).split('@' ) ) == 2: A__ , A__ = model_id.split('@' ) return cls._from_pretrained( model_id=lowercase_,revision=lowercase_,cache_dir=lowercase_,force_download=lowercase_,use_auth_token=lowercase_,**lowercase_,)
7
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'open-llama' def __init__( self : Any,lowercase_ : Optional[int]=1_0_0_0_0_0,lowercase_ : Union[str, Any]=4_0_9_6,lowercase_ : Dict=1_1_0_0_8,lowercase_ : Dict=3_2,lowercase_ : Optional[int]=3_2,lowercase_ : Dict="silu",lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Optional[int]=0.02,lowercase_ : Dict=1E-6,lowercase_ : Dict=True,lowercase_ : List[Any]=0,lowercase_ : Optional[int]=1,lowercase_ : str=2,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : int=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=True,lowercase_ : Any=None,**lowercase_ : List[Any],)-> Tuple: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = initializer_range A__ = rms_norm_eps A__ = use_cache A__ = kwargs.pop( 'use_memorry_efficient_attention',lowercase_ ) A__ = hidden_dropout_prob A__ = attention_dropout_prob A__ = use_stable_embedding A__ = shared_input_output_embedding A__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,tie_word_embeddings=lowercase_,**lowercase_,) def snake_case__ ( self : str )-> str: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling,lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'got {self.rope_scaling}' ) A__ = self.rope_scaling.get('type',lowercase_ ) A__ = self.rope_scaling.get('factor',lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(lowercase_,lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
7
1
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() @dataclass class A : """simple docstring""" lowerCamelCase = 42 lowerCamelCase = field(default_factory=_UpperCAmelCase ) lowerCamelCase = field(default_factory=_UpperCAmelCase ) def snake_case__ ( self : Union[str, Any],lowercase_ : Any,lowercase_ : Tensor,lowercase_ : Tensor )-> List[Any]: '''simple docstring''' A__ = len(list(m.modules() ) ) == 1 or isinstance(lowercase_,nn.Convad ) or isinstance(lowercase_,nn.BatchNormad ) if has_not_submodules: self.traced.append(lowercase_ ) def __call__( self : Dict,lowercase_ : Tensor )-> Tuple: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowercase_ ) [x.remove() for x in self.handles] return self @property def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' return list(filter(lambda lowercase_ : len(list(x.state_dict().keys() ) ) > 0,self.traced ) ) @dataclass class A : """simple docstring""" lowerCamelCase = 42 lowerCamelCase = 42 lowerCamelCase = 0 lowerCamelCase = field(default_factory=_UpperCAmelCase ) lowerCamelCase = field(default_factory=_UpperCAmelCase ) def __call__( self : List[str],lowercase_ : Tensor )-> Union[str, Any]: '''simple docstring''' A__ = Tracker(self.dest )(lowercase_ ).parametrized A__ = Tracker(self.src )(lowercase_ ).parametrized A__ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.src_skip,lowercase_ ) ) A__ = list(filter(lambda lowercase_ : type(lowercase_ ) not in self.dest_skip,lowercase_ ) ) if len(lowercase_ ) != len(lowercase_ ): raise Exception( F'Numbers of operations are different. Source module has {len(lowercase_ )} operations while' F' destination module has {len(lowercase_ )}.' ) for dest_m, src_m in zip(lowercase_,lowercase_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'Transfered from={src_m} to={dest_m}' ) def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : ResNetConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ) -> int: '''simple docstring''' print(f'Converting {name}...' ) with torch.no_grad(): A__ = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ).eval() A__ = ResNetForImageClassification(SCREAMING_SNAKE_CASE__ ).eval() A__ = ModuleTransfer(src=SCREAMING_SNAKE_CASE__ , dest=SCREAMING_SNAKE_CASE__ ) A__ = torch.randn((1, 3, 224, 224) ) module_transfer(SCREAMING_SNAKE_CASE__ ) assert torch.allclose(from_model(SCREAMING_SNAKE_CASE__ ) , our_model(SCREAMING_SNAKE_CASE__ ).logits ), "The model logits don't match the original one." A__ = f'resnet{"-".join(name.split("resnet" ) )}' print(SCREAMING_SNAKE_CASE__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=SCREAMING_SNAKE_CASE__ , ) # we can use the convnext one A__ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=SCREAMING_SNAKE_CASE__ , ) print(f'Pushed {checkpoint_name}' ) def _snake_case( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ) -> Dict: '''simple docstring''' A__ = 'imagenet-1k-id2label.json' A__ = 1000 A__ = (1, num_labels) A__ = 'huggingface/label-files' A__ = num_labels A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) ) A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ ) A__ = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported resnet* architecture," " currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
7
import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return EnvironmentCommand() class A ( _UpperCAmelCase ): """simple docstring""" @staticmethod def snake_case__ ( lowercase_ : ArgumentParser )-> Dict: '''simple docstring''' A__ = parser.add_parser('env' ) download_parser.set_defaults(func=lowercase_ ) def snake_case__ ( self : List[Any] )-> List[str]: '''simple docstring''' A__ = huggingface_hub.__version__ A__ = 'not installed' A__ = 'NA' if is_torch_available(): import torch A__ = torch.__version__ A__ = torch.cuda.is_available() A__ = 'not installed' if is_transformers_available(): import transformers A__ = transformers.__version__ A__ = 'not installed' if is_accelerate_available(): import accelerate A__ = accelerate.__version__ A__ = 'not installed' if is_xformers_available(): import xformers A__ = xformers.__version__ A__ = { '`diffusers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})', 'Huggingface_hub version': hub_version, 'Transformers version': transformers_version, 'Accelerate version': accelerate_version, 'xFormers version': xformers_version, 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(lowercase_ ) ) return info @staticmethod def snake_case__ ( lowercase_ : int )-> Optional[Any]: '''simple docstring''' return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
7
1
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class A ( _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' A__ = tempfile.mkdtemp() A__ = 8 # DPR tok A__ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] A__ = os.path.join(self.tmpdirname,'dpr_tokenizer' ) os.makedirs(lowercase_,exist_ok=lowercase_ ) A__ = os.path.join(lowercase_,DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file,'w',encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok A__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] A__ = dict(zip(lowercase_,range(len(lowercase_ ) ) ) ) A__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A__ = {'unk_token': '<unk>'} A__ = os.path.join(self.tmpdirname,'bart_tokenizer' ) os.makedirs(lowercase_,exist_ok=lowercase_ ) A__ = os.path.join(lowercase_,BART_VOCAB_FILES_NAMES['vocab_file'] ) A__ = os.path.join(lowercase_,BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file,'w',encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase_ ) + '\n' ) with open(self.merges_file,'w',encoding='utf-8' ) as fp: fp.write('\n'.join(lowercase_ ) ) def snake_case__ ( self : List[str] )-> DPRQuestionEncoderTokenizer: '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname,'dpr_tokenizer' ) ) def snake_case__ ( self : List[Any] )-> BartTokenizer: '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname,'bart_tokenizer' ) ) def snake_case__ ( self : List[str] )-> List[str]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) @require_tokenizers def snake_case__ ( self : str )-> Any: '''simple docstring''' A__ = os.path.join(self.tmpdirname,'rag_tokenizer' ) A__ = RagConfig(question_encoder=DPRConfig().to_dict(),generator=BartConfig().to_dict() ) A__ = RagTokenizer(question_encoder=self.get_dpr_tokenizer(),generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(lowercase_ ) rag_tokenizer.save_pretrained(lowercase_ ) A__ = RagTokenizer.from_pretrained(lowercase_,config=lowercase_ ) self.assertIsInstance(new_rag_tokenizer.question_encoder,lowercase_ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(),rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator,lowercase_ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab(),rag_tokenizer.generator.get_vocab() ) @slow def snake_case__ ( self : Optional[int] )-> Union[str, Any]: '''simple docstring''' A__ = RagTokenizer.from_pretrained('facebook/rag-token-nq' ) A__ = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] A__ = tokenizer(lowercase_ ) self.assertIsNotNone(lowercase_ ) @slow def snake_case__ ( self : Optional[Any] )-> Dict: '''simple docstring''' A__ = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' ) A__ = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] A__ = tokenizer(lowercase_ ) self.assertIsNotNone(lowercase_ )
7
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ReformerTokenizer lowerCamelCase = ReformerTokenizerFast lowerCamelCase = True lowerCamelCase = False lowerCamelCase = True def snake_case__ ( self : Any )-> str: '''simple docstring''' super().setUp() A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : Optional[int] )-> Optional[int]: '''simple docstring''' A__ = '<s>' A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0],'<unk>' ) self.assertEqual(vocab_keys[1],'<s>' ) self.assertEqual(vocab_keys[-1],'j' ) self.assertEqual(len(lowercase_ ),1_0_0_0 ) def snake_case__ ( self : Dict )-> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size,1_0_0_0 ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = 'I was born in 92000, and this is falsé.' A__ = tokenizer.tokenize(lowercase_ ) A__ = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(lowercase_ ) A__ = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) def snake_case__ ( self : int,lowercase_ : Optional[int]=1_5 )-> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A__ = self.rust_tokenizer_class.from_pretrained(lowercase_,**lowercase_ ) # Simple input A__ = 'This is a simple input' A__ = ['This is a simple input 1', 'This is a simple input 2'] A__ = ('This is a simple input', 'This is a pair') A__ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' ) # Simple input self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' ) # Simple input self.assertRaises( lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',) # Pair input self.assertRaises(lowercase_,tokenizer_r.encode,lowercase_,max_length=lowercase_,padding='max_length' ) # Pair input self.assertRaises(lowercase_,tokenizer_r.encode_plus,lowercase_,max_length=lowercase_,padding='max_length' ) # Pair input self.assertRaises( lowercase_,tokenizer_r.batch_encode_plus,lowercase_,max_length=lowercase_,padding='max_length',) def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' pass def snake_case__ ( self : Dict )-> str: '''simple docstring''' A__ = ReformerTokenizer(lowercase_,keep_accents=lowercase_ ) A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ),[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2],) A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ],) A__ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4],) A__ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ],) @cached_property def snake_case__ ( self : Optional[int] )-> Any: '''simple docstring''' return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' ) @slow def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = 'Hello World!' A__ = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7] self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) ) @slow def snake_case__ ( self : Optional[int] )-> str: '''simple docstring''' A__ = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) A__ = [ 1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 3_5, 2_8, 2_7_5, 3, 2_5_9, 2_9_7, 2_6_0, 8_4, 4, 3_5, 1_1_0, 4_4, 8, 2_5_9, 9_1, 2_6_8, 2_1, 1_1, 2_0_9, 2_7_4, 1_0_9, 2_6_6, 2_7_7, 1_1_7, 8_6, 9_3, 3_1_5, 2_5_8, 2_7_8, 2_5_8, 2_7_7, 2_5_8, 0, 2_5_8, 2_8_8, 2_5_8, 3_1_9, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 2_8_7, 2_5_8, 3_1_5, 2_5_8, 2_8_9, 2_5_8, 2_7_8, 9_9, 2_6_9, 2_6_6, 2_6_2, 8, 2_5_9, 2_4_1, 4, 2_1_7, 2_3_0, 2_6_8, 2_6_6, 5_5, 1_6_8, 1_0_6, 7_5, 1_9_3, 2_6_6, 2_2_3, 2_7, 4_9, 2_6, 2_8_2, 2_5, 2_6_4, 2_9_9, 1_9, 2_6, 0, 2_5_8, 2_7_7, 1_1_7, 8_6, 9_3, 1_7_6, 1_8_3, 2_7_0, 1_1, 2_6_2, 4_2, 6_1, 2_6_5, ] self.assertListEqual(lowercase_,self.big_tokenizer.encode(lowercase_ ) ) @require_torch @slow def snake_case__ ( self : int )-> Any: '''simple docstring''' import torch from transformers import ReformerConfig, ReformerModel # Build sequence A__ = list(self.big_tokenizer.get_vocab().keys() )[:1_0] A__ = ' '.join(lowercase_ ) A__ = self.big_tokenizer.encode_plus(lowercase_,return_tensors='pt' ) A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors='pt' ) A__ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) A__ = encoded_sequence['input_ids'].shape A__ = ReformerModel(lowercase_ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase_ ) model(**lowercase_ ) @slow def snake_case__ ( self : int )-> Tuple: '''simple docstring''' A__ = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 A__ = [ 'This is a very simple sentence.', 'The quick brown fox jumps over the lazy dog.', ] self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='google/reformer-crime-and-punishment',revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a',padding=lowercase_,sequences=lowercase_,)
7
1
import comet # From: unbabel-comet import torch import datasets lowercase_ = datasets.logging.get_logger(__name__) lowercase_ = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" lowercase_ = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" lowercase_ = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): """simple docstring""" def snake_case__ ( self : Union[str, Any] )-> List[str]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION,citation=_CITATION,homepage='https://unbabel.github.io/COMET/html/index.html',inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features( { 'sources': datasets.Value('string',id='sequence' ), 'predictions': datasets.Value('string',id='sequence' ), 'references': datasets.Value('string',id='sequence' ), } ),codebase_urls=['https://github.com/Unbabel/COMET'],reference_urls=[ 'https://github.com/Unbabel/COMET', 'https://www.aclweb.org/anthology/2020.emnlp-main.213/', 'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6', ],) def snake_case__ ( self : Dict,lowercase_ : int )-> List[Any]: '''simple docstring''' if self.config_name == "default": A__ = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) ) else: A__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def snake_case__ ( self : Tuple,lowercase_ : Any,lowercase_ : int,lowercase_ : Optional[int],lowercase_ : Dict=None,lowercase_ : str=False )-> str: '''simple docstring''' if gpus is None: A__ = 1 if torch.cuda.is_available() else 0 A__ = {'src': sources, 'mt': predictions, 'ref': references} A__ = [dict(zip(lowercase_,lowercase_ ) ) for t in zip(*data.values() )] A__ , A__ = self.scorer.predict(lowercase_,gpus=lowercase_,progress_bar=lowercase_ ) return {"mean_score": mean_score, "scores": scores}
7
def _snake_case( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , ) -> float: '''simple docstring''' A__ = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: A__ = 1 - (matter_density + radiation_density + dark_energy) A__ = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) A__ = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation lowercase_ = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
7
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> List[Any]: '''simple docstring''' A__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ('cls_token', 'vit.embeddings.cls_token'), ('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'vit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A__ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Optional[int]: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: A__ = '' else: A__ = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) A__ = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[ : config.hidden_size, : ] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[ -config.hidden_size :, : ] A__ = in_proj_bias[-config.hidden_size :] def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: '''simple docstring''' A__ = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]: '''simple docstring''' A__ = dct.pop(SCREAMING_SNAKE_CASE__ ) A__ = val def _snake_case( ) -> Any: '''simple docstring''' A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Dict: '''simple docstring''' A__ = ViTConfig() A__ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": A__ = True A__ = int(vit_name[-12:-10] ) A__ = int(vit_name[-9:-6] ) else: A__ = 1000 A__ = 'huggingface/label-files' A__ = 'imagenet-1k-id2label.json' A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) ) A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = int(vit_name[-6:-4] ) A__ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('tiny' ): A__ = 192 A__ = 768 A__ = 12 A__ = 3 elif vit_name[9:].startswith('small' ): A__ = 384 A__ = 1536 A__ = 12 A__ = 6 else: pass else: if vit_name[4:].startswith('small' ): A__ = 768 A__ = 2304 A__ = 8 A__ = 8 elif vit_name[4:].startswith('base' ): pass elif vit_name[4:].startswith('large' ): A__ = 1024 A__ = 4096 A__ = 24 A__ = 16 elif vit_name[4:].startswith('huge' ): A__ = 1280 A__ = 5120 A__ = 32 A__ = 16 # load original model from timm A__ = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys A__ = timm_model.state_dict() if base_model: remove_classification_head_(SCREAMING_SNAKE_CASE__ ) A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # load HuggingFace model if vit_name[-5:] == "in21k": A__ = ViTModel(SCREAMING_SNAKE_CASE__ ).eval() else: A__ = ViTForImageClassification(SCREAMING_SNAKE_CASE__ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: A__ = DeiTImageProcessor(size=config.image_size ) else: A__ = ViTImageProcessor(size=config.image_size ) A__ = image_processor(images=prepare_img() , return_tensors='pt' ) A__ = encoding['pixel_values'] A__ = model(SCREAMING_SNAKE_CASE__ ) if base_model: A__ = timm_model.forward_features(SCREAMING_SNAKE_CASE__ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.pooler_output , atol=1E-3 ) else: A__ = timm_model(SCREAMING_SNAKE_CASE__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.logits , atol=1E-3 ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_patch16_224", type=str, help="Name of the ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) lowercase_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
7
from typing import Union import fire import torch from tqdm import tqdm def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str = "cpu" , SCREAMING_SNAKE_CASE__ : Union[str, None] = None ) -> None: '''simple docstring''' A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ ) for k, v in tqdm(state_dict.items() ): if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' ) A__ = v.half() if save_path is None: # overwrite src_path A__ = src_path torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": fire.Fire(convert)
7
1
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ = { "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], "tokenization_cpmant": ["CpmAntTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
7
import os # Precomputes a list of the 100 first triangular numbers lowercase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def _snake_case( ) -> int: '''simple docstring''' A__ = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE__ ) ) A__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'words.txt' ) A__ = '' with open(SCREAMING_SNAKE_CASE__ ) as f: A__ = f.readline() A__ = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A__ = [ word for word in [sum(ord(SCREAMING_SNAKE_CASE__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": print(solution())
7
1
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel lowercase_ = HfApi() lowercase_ = {} # fmt: off lowercase_ = torch.tensor([ -0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467, 1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189, -1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839, 0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557 ]) lowercase_ = torch.tensor([ -2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436, 1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208, -2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948, 2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365 ]) lowercase_ = torch.tensor([ -0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869, -0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304, -0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925, 0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943 ]) lowercase_ = torch.tensor([ 0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172, -0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309, 0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805, -0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505 ]) lowercase_ = torch.tensor([ 0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133, -0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395, 0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559, -0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386 ]) lowercase_ = torch.tensor([ 0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078, -0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330, 0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683, -0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431 ]) lowercase_ = torch.tensor([ 0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042, -0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398, 0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574, -0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390 ]) lowercase_ = torch.tensor([ 0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042, -0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290, 0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746, -0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473 ]) lowercase_ = torch.tensor([ -1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330, 1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243, -2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810, 1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251]) lowercase_ = torch.tensor([ -1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324, 0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181, -2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259, 1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266 ]) lowercase_ = torch.tensor([ -1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212, 0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027, -2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131, 1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355 ]) lowercase_ = torch.tensor([ -2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959, 1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351, -3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341, 3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066 ]) lowercase_ = torch.tensor([ -2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740, 1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398, -2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395, 2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243 ]) lowercase_ = torch.tensor([ -2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336, 1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908, -3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560, 3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343 ]) lowercase_ = torch.tensor([ -1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344, 1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391, -2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439, 1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219 ]) # fmt: on lowercase_ = api.list_models(filter="diffusers") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": lowercase_ = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1] print(f"""Started running {mod.modelId}!!!""") if mod.modelId.startswith("CompVis"): lowercase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet") else: lowercase_ = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) lowercase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) lowercase_ = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): lowercase_ = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3 ) print(f"""{mod.modelId} has passed successfully!!!""")
7
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin lowercase_ = False @skip_mps class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = StableDiffusionAttendAndExcitePipeline lowerCamelCase = False lowerCamelCase = TEXT_TO_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} ) lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def snake_case__ ( cls : Any )-> Optional[Any]: '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(lowercase_ ) @classmethod def snake_case__ ( cls : Optional[Any] )-> Dict: '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(lowercase_ ) def snake_case__ ( self : List[str] )-> int: '''simple docstring''' torch.manual_seed(0 ) A__ = UNetaDConditionModel( block_out_channels=(3_2, 6_4),layers_per_block=1,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'),up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'),cross_attention_dim=3_2,attention_head_dim=(2, 4),use_linear_projection=lowercase_,) A__ = DDIMScheduler( beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,) torch.manual_seed(0 ) A__ = AutoencoderKL( block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,) torch.manual_seed(0 ) A__ = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,) A__ = CLIPTextModel(lowercase_ ) A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) A__ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : List[Any]=0 )-> int: '''simple docstring''' if str(lowercase_ ).startswith('mps' ): A__ = torch.manual_seed(lowercase_ ) else: A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) A__ = A__ = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def snake_case__ ( self : List[str] )-> Optional[Any]: '''simple docstring''' A__ = 'cpu' A__ = self.get_dummy_components() A__ = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) A__ = self.get_dummy_inputs(lowercase_ ) A__ = pipe(**lowercase_ ).images A__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape,(1, 6_4, 6_4, 3) ) A__ = np.array( [0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] ) A__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_,1E-3 ) def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def snake_case__ ( self : str )-> int: '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def snake_case__ ( self : str )-> Optional[int]: '''simple docstring''' self._test_inference_batch_single_identical(batch_size=2,expected_max_diff=7E-4 ) def snake_case__ ( self : Optional[Any] )-> int: '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def snake_case__ ( self : Dict )-> Any: '''simple docstring''' super().test_save_load_local(expected_max_difference=5E-4 ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class A ( unittest.TestCase ): """simple docstring""" @classmethod def snake_case__ ( cls : Any )-> Optional[int]: '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(lowercase_ ) @classmethod def snake_case__ ( cls : int )-> List[Any]: '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(lowercase_ ) def snake_case__ ( self : List[Any] )-> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' A__ = torch.manual_seed(5_1 ) A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4',safety_checker=lowercase_,torch_dtype=torch.floataa ) pipe.to('cuda' ) A__ = 'a painting of an elephant with glasses' A__ = [5, 7] A__ = pipe( prompt=lowercase_,token_indices=lowercase_,guidance_scale=7.5,generator=lowercase_,num_inference_steps=5,max_iter_to_alter=5,output_type='numpy',).images[0] A__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' ) assert np.abs((expected_image - image).max() ) < 5E-1
7
1
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: '''simple docstring''' A__ = os.path.abspath(SCREAMING_SNAKE_CASE__ ) logger.info(f'Converting TensorFlow checkpoint from {tf_path}' ) # Load weights from TF model A__ = tf.train.list_variables(SCREAMING_SNAKE_CASE__ ) A__ = [] A__ = [] A__ = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") A__ = full_name.split('/' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(f'Skipping non-model layer {full_name}' ) continue if "optimizer" in full_name: logger.info(f'Skipping optimization layer {full_name}' ) continue if name[0] == "model": # ignore initial 'model' A__ = name[1:] # figure out how many levels deep the name is A__ = 0 for _name in name: if _name.startswith('layer_with_weights' ): depth += 1 else: break layer_depth.append(SCREAMING_SNAKE_CASE__ ) # read data A__ = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) names.append('/'.join(SCREAMING_SNAKE_CASE__ ) ) arrays.append(SCREAMING_SNAKE_CASE__ ) logger.info(f'Read a total of {len(SCREAMING_SNAKE_CASE__ ):,} layers' ) # Sanity check if len(set(SCREAMING_SNAKE_CASE__ ) ) != 1: raise ValueError(f'Found layer names with different depths (layer depth {list(set(SCREAMING_SNAKE_CASE__ ) )})' ) A__ = list(set(SCREAMING_SNAKE_CASE__ ) )[0] if layer_depth != 1: raise ValueError( 'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP' ' heads.' ) # convert layers logger.info('Converting weights...' ) for full_name, array in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): A__ = full_name.split('/' ) A__ = model A__ = [] for i, m_name in enumerate(SCREAMING_SNAKE_CASE__ ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('layer_with_weights' ): A__ = int(m_name.split('-' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['embeddings', 'LayerNorm'] ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'embeddings' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'LayerNorm' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['encoder', 'layer', str(layer_num - 4 )] ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'encoder' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'layer' ) A__ = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['pooler', 'dense'] ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'pooler' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'dense' ) elif m_name == "embeddings": trace.append('embeddings' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'embeddings' ) if layer_num == 0: trace.append('word_embeddings' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'word_embeddings' ) elif layer_num == 1: trace.append('position_embeddings' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'position_embeddings' ) elif layer_num == 2: trace.append('token_type_embeddings' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'token_type_embeddings' ) else: raise ValueError(f'Unknown embedding layer with name {full_name}' ) trace.append('weight' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'weight' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['attention', 'self'] ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'attention' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'self' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['attention', 'output', 'LayerNorm'] ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'attention' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'output' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'LayerNorm' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['attention', 'output', 'dense'] ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'attention' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'output' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'dense' ) elif m_name == "_output_dense": # output dense trace.extend(['output', 'dense'] ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'output' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'dense' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['output', 'LayerNorm'] ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'output' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'LayerNorm' ) elif m_name == "_key_dense": # attention key trace.append('key' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'key' ) elif m_name == "_query_dense": # attention query trace.append('query' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'query' ) elif m_name == "_value_dense": # attention value trace.append('value' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'value' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['intermediate', 'dense'] ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'intermediate' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'dense' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('output' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'output' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('bias' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'bias' ) elif m_name in ["kernel", "gamma"]: trace.append('weight' ) A__ = getattr(SCREAMING_SNAKE_CASE__ , 'weight' ) else: logger.warning(f'Ignored {m_name}' ) # for certain layers reshape is necessary A__ = '.'.join(SCREAMING_SNAKE_CASE__ ) if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , SCREAMING_SNAKE_CASE__ ) or re.match( R'(\S+)\.attention\.output\.dense\.weight' , SCREAMING_SNAKE_CASE__ ): A__ = array.reshape(pointer.data.shape ) if "kernel" in full_name: A__ = array.transpose() if pointer.shape == array.shape: A__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) else: raise ValueError( f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:' f' {array.shape}' ) logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' ) return model def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any: '''simple docstring''' logger.info(f'Loading model based on config from {config_path}...' ) A__ = BertConfig.from_json_file(SCREAMING_SNAKE_CASE__ ) A__ = BertModel(SCREAMING_SNAKE_CASE__ ) # Load weights from checkpoint logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' ) load_tfa_weights_in_bert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Save pytorch-model logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model (must include filename).", ) lowercase_ = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
7
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL lowercase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : tuple , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , ) -> Union[str, Any]: '''simple docstring''' output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , ) else: export( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , ) @torch.no_grad() def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> Tuple: '''simple docstring''' A__ = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): A__ = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: A__ = 'cpu' A__ = Path(SCREAMING_SNAKE_CASE__ ) # VAE DECODER A__ = AutoencoderKL.from_pretrained(model_path + '/vae' ) A__ = vae_decoder.config.latent_channels # forward only through the decoder part A__ = vae_decoder.decode onnx_export( SCREAMING_SNAKE_CASE__ , model_args=( torch.randn(1 , SCREAMING_SNAKE_CASE__ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=SCREAMING_SNAKE_CASE__ , ) del vae_decoder if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") lowercase_ = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
7
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase_ = { "configuration_mask2former": [ "MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "Mask2FormerConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["Mask2FormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "Mask2FormerForUniversalSegmentation", "Mask2FormerModel", "Mask2FormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
7
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = (DPMSolverSinglestepScheduler,) lowerCamelCase = (('num_inference_steps', 25),) def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Optional[int]: '''simple docstring''' A__ = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'prediction_type': 'epsilon', 'thresholding': False, 'sample_max_value': 1.0, 'algorithm_type': 'dpmsolver++', 'solver_type': 'midpoint', 'lambda_min_clipped': -float('inf' ), 'variance_type': None, } config.update(**lowercase_ ) return config def snake_case__ ( self : str,lowercase_ : Optional[Any]=0,**lowercase_ : Any )-> List[Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ , A__ = sample, sample for t in range(lowercase_,time_step + scheduler.config.solver_order + 1 ): A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : List[str] )-> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any]=0,**lowercase_ : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = dict(self.forward_default_kwargs ) A__ = kwargs.pop('num_inference_steps',lowercase_ ) A__ = self.dummy_sample A__ = 0.1 * sample A__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A__ = self.get_scheduler_config() A__ = scheduler_class(**lowercase_ ) scheduler.set_timesteps(lowercase_ ) # copy over dummy past residuals (must be after setting timesteps) A__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowercase_ ) A__ = scheduler_class.from_pretrained(lowercase_ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowercase_ ) # copy over dummy past residual (must be after setting timesteps) A__ = dummy_past_residuals[: new_scheduler.config.solver_order] A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=None,**lowercase_ : int )-> int: '''simple docstring''' if scheduler is None: A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(**lowercase_ ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample return sample def snake_case__ ( self : Any )-> str: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = 5_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter scheduler.set_timesteps(lowercase_ ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_574 ) < 1E-3 def snake_case__ ( self : Optional[Any] )-> List[Any]: '''simple docstring''' for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowercase_ ) def snake_case__ ( self : int )-> Optional[Any]: '''simple docstring''' A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 A__ = DEISMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverMultistepScheduler.from_config(scheduler.config ) A__ = UniPCMultistepScheduler.from_config(scheduler.config ) A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A__ = self.full_loop(scheduler=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Tuple )-> Any: '''simple docstring''' self.check_over_configs(thresholding=lowercase_ ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=lowercase_,prediction_type=lowercase_,sample_max_value=lowercase_,algorithm_type='dpmsolver++',solver_order=lowercase_,solver_type=lowercase_,) def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) A__ = self.full_loop( solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,) assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers" def snake_case__ ( self : Optional[int] )-> Tuple: '''simple docstring''' self.check_over_configs(lower_order_final=lowercase_ ) self.check_over_configs(lower_order_final=lowercase_ ) def snake_case__ ( self : Tuple )-> Optional[int]: '''simple docstring''' self.check_over_configs(lambda_min_clipped=-float('inf' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def snake_case__ ( self : Optional[Any] )-> Tuple: '''simple docstring''' self.check_over_configs(variance_type=lowercase_ ) self.check_over_configs(variance_type='learned_range' ) def snake_case__ ( self : str )-> Any: '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=lowercase_,time_step=0 ) def snake_case__ ( self : Tuple )-> Tuple: '''simple docstring''' A__ = self.full_loop() A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_791 ) < 1E-3 def snake_case__ ( self : Any )-> Union[str, Any]: '''simple docstring''' A__ = self.full_loop(use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.2_248 ) < 1E-3 def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction' ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.1_453 ) < 1E-3 def snake_case__ ( self : Tuple )-> int: '''simple docstring''' A__ = self.full_loop(prediction_type='v_prediction',use_karras_sigmas=lowercase_ ) A__ = torch.mean(torch.abs(lowercase_ ) ) assert abs(result_mean.item() - 0.0_649 ) < 1E-3 def snake_case__ ( self : List[Any] )-> int: '''simple docstring''' A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(thresholding=lowercase_,dynamic_thresholding_ratio=0 ) A__ = scheduler_class(**lowercase_ ) A__ = 1_0 A__ = self.dummy_model() A__ = self.dummy_sample_deter.half() scheduler.set_timesteps(lowercase_ ) for i, t in enumerate(scheduler.timesteps ): A__ = model(lowercase_,lowercase_ ) A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample assert sample.dtype == torch.floataa
7
1
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def _snake_case( ) -> List[str]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def _snake_case( ) -> Dict: '''simple docstring''' A__ = 'mock-s3-bucket' A__ = f's3://{mock_bucket}' A__ = extract_path_from_uri(SCREAMING_SNAKE_CASE__ ) assert dataset_path.startswith('s3://' ) is False A__ = './local/path' A__ = extract_path_from_uri(SCREAMING_SNAKE_CASE__ ) assert dataset_path == new_dataset_path def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> int: '''simple docstring''' A__ = is_remote_filesystem(SCREAMING_SNAKE_CASE__ ) assert is_remote is True A__ = fsspec.filesystem('file' ) A__ = is_remote_filesystem(SCREAMING_SNAKE_CASE__ ) assert is_remote is False @pytest.mark.parametrize('compression_fs_class' , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]: '''simple docstring''' A__ = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file} A__ = input_paths[compression_fs_class.protocol] if input_path is None: A__ = f'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(SCREAMING_SNAKE_CASE__ ) A__ = fsspec.filesystem(compression_fs_class.protocol , fo=SCREAMING_SNAKE_CASE__ ) assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = os.path.basename(SCREAMING_SNAKE_CASE__ ) A__ = expected_filename[: expected_filename.rindex('.' )] assert fs.glob('*' ) == [expected_filename] with fs.open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f, open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('protocol' , ['zip', 'gzip'] ) def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' A__ = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path} A__ = compressed_file_paths[protocol] A__ = 'dataset.jsonl' A__ = f'{protocol}://{member_file_path}::{compressed_file_path}' A__ , *A__ = fsspec.get_fs_token_paths(SCREAMING_SNAKE_CASE__ ) assert fs.isfile(SCREAMING_SNAKE_CASE__ ) assert not fs.isfile('non_existing_' + member_file_path ) @pytest.mark.integration def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[int]: '''simple docstring''' A__ = hf_api.dataset_info(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ ) A__ = HfFileSystem(repo_info=SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ ) assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"] assert hffs.isdir('data' ) assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' ) with open(SCREAMING_SNAKE_CASE__ ) as f: assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read() def _snake_case( ) -> str: '''simple docstring''' A__ = 'bz2' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , clobber=SCREAMING_SNAKE_CASE__ ) with pytest.warns(SCREAMING_SNAKE_CASE__ ) as warning_info: importlib.reload(datasets.filesystems ) assert len(SCREAMING_SNAKE_CASE__ ) == 1 assert ( str(warning_info[0].message ) == f'A filesystem protocol was already set for {protocol} and will be overwritten.' )
7
class A : """simple docstring""" def __init__( self : Any,lowercase_ : Tuple,lowercase_ : Any,lowercase_ : List[str] )-> List[Any]: '''simple docstring''' A__ = name A__ = value A__ = weight def __repr__( self : int )-> Tuple: '''simple docstring''' return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})' def snake_case__ ( self : Any )-> str: '''simple docstring''' return self.value def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' return self.name def snake_case__ ( self : Any )-> Dict: '''simple docstring''' return self.weight def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' return self.value / self.weight def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: '''simple docstring''' A__ = [] for i in range(len(SCREAMING_SNAKE_CASE__ ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> Any: '''simple docstring''' A__ = sorted(SCREAMING_SNAKE_CASE__ , key=SCREAMING_SNAKE_CASE__ , reverse=SCREAMING_SNAKE_CASE__ ) A__ = [] A__ , A__ = 0.0, 0.0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _snake_case( ) -> Any: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
7
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json" ), "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json", # See all REALM models at https://huggingface.co/models?filter=realm } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'realm' def __init__( self : Dict,lowercase_ : Any=3_0_5_2_2,lowercase_ : Tuple=7_6_8,lowercase_ : List[str]=1_2_8,lowercase_ : List[Any]=1_2,lowercase_ : str=1_2,lowercase_ : Union[str, Any]=8,lowercase_ : int=3_0_7_2,lowercase_ : List[str]="gelu_new",lowercase_ : str=0.1,lowercase_ : Optional[Any]=0.1,lowercase_ : Dict=5_1_2,lowercase_ : Dict=2,lowercase_ : Optional[Any]=0.02,lowercase_ : List[str]=1E-12,lowercase_ : Any=2_5_6,lowercase_ : Optional[int]=1_0,lowercase_ : List[Any]=1E-3,lowercase_ : int=5,lowercase_ : int=3_2_0,lowercase_ : int=1_3_3_5_3_7_1_8,lowercase_ : int=5_0_0_0,lowercase_ : Tuple=1,lowercase_ : Union[str, Any]=0,lowercase_ : Dict=2,**lowercase_ : Dict,)-> Optional[int]: '''simple docstring''' super().__init__(pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,**lowercase_ ) # Common config A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = retriever_proj_size A__ = num_hidden_layers A__ = num_attention_heads A__ = num_candidates A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = type_vocab_size A__ = layer_norm_eps # Reader config A__ = span_hidden_size A__ = max_span_width A__ = reader_layer_norm_eps A__ = reader_beam_size A__ = reader_seq_len # Retrieval config A__ = num_block_records A__ = searcher_beam_size
7
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase_ = logging.get_logger(__name__) lowercase_ = { "microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json", } class A ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'resnet' lowerCamelCase = ['basic', 'bottleneck'] def __init__( self : Optional[Any],lowercase_ : int=3,lowercase_ : List[str]=6_4,lowercase_ : int=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8],lowercase_ : Tuple=[3, 4, 6, 3],lowercase_ : Union[str, Any]="bottleneck",lowercase_ : List[str]="relu",lowercase_ : Tuple=False,lowercase_ : List[str]=None,lowercase_ : List[Any]=None,**lowercase_ : str,)-> Optional[Any]: '''simple docstring''' super().__init__(**lowercase_ ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) A__ = num_channels A__ = embedding_size A__ = hidden_sizes A__ = depths A__ = layer_type A__ = hidden_act A__ = downsample_in_first_stage A__ = ['stem'] + [F'stage{idx}' for idx in range(1,len(lowercase_ ) + 1 )] A__ , A__ = get_aligned_output_features_output_indices( out_features=lowercase_,out_indices=lowercase_,stage_names=self.stage_names ) class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = version.parse('1.11' ) @property def snake_case__ ( self : List[Any] )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def snake_case__ ( self : Any )-> float: '''simple docstring''' return 1E-3
7
1
import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = TransfoXLTokenizer lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : List[Any] )-> Any: '''simple docstring''' super().setUp() A__ = [ '<unk>', '[CLS]', '[SEP]', 'want', 'unwanted', 'wa', 'un', 'running', ',', 'low', 'l', ] A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file,'w',encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def snake_case__ ( self : str,**lowercase_ : str )-> Optional[int]: '''simple docstring''' A__ = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname,**lowercase_ ) def snake_case__ ( self : Any,lowercase_ : Optional[int] )-> int: '''simple docstring''' A__ = '<unk> UNwanted , running' A__ = '<unk> unwanted, running' return input_text, output_text def snake_case__ ( self : List[str] )-> Tuple: '''simple docstring''' A__ = TransfoXLTokenizer(vocab_file=self.vocab_file,lower_case=lowercase_ ) A__ = tokenizer.tokenize('<unk> UNwanted , running' ) self.assertListEqual(lowercase_,['<unk>', 'unwanted', ',', 'running'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[0, 4, 8, 7] ) def snake_case__ ( self : List[Any] )-> Union[str, Any]: '''simple docstring''' A__ = TransfoXLTokenizer(lower_case=lowercase_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ),['hello', '!', 'how', 'are', 'you', '?'] ) def snake_case__ ( self : Tuple )-> Optional[int]: '''simple docstring''' A__ = TransfoXLTokenizer(lower_case=lowercase_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ),['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def snake_case__ ( self : Any )-> str: '''simple docstring''' A__ = TransfoXLTokenizer(lower_case=lowercase_ ) A__ = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?' A__ = [ 'Hello', '(', 'bracket', ')', 'and', 'side', '@-@', 'scrolled', '[', 'and', ']', 'Henry', '\'s', '$', '5', '@,@', '000', 'with', '3', '@.@', '34', 'm', '.', 'What', '\'s', 'up', '!', '?', ] self.assertListEqual(tokenizer.tokenize(lowercase_ ),lowercase_ ) self.assertEqual(tokenizer.convert_tokens_to_string(lowercase_ ),lowercase_ ) def snake_case__ ( self : str )-> str: '''simple docstring''' A__ = self.get_tokenizer() A__ = len(lowercase_ ) tokenizer.add_tokens(['new1', 'new2'] ) tokenizer.move_added_token('new1',1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(lowercase_ ),original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('new1' ),[1] ) self.assertEqual(tokenizer.decode([1] ),'new1' )
7
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "t5-small": "https://huggingface.co/t5-small/resolve/main/config.json", "t5-base": "https://huggingface.co/t5-base/resolve/main/config.json", "t5-large": "https://huggingface.co/t5-large/resolve/main/config.json", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 't5' lowerCamelCase = ['past_key_values'] lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : Union[str, Any],lowercase_ : int=3_2_1_2_8,lowercase_ : int=5_1_2,lowercase_ : List[str]=6_4,lowercase_ : Tuple=2_0_4_8,lowercase_ : Any=6,lowercase_ : List[str]=None,lowercase_ : Union[str, Any]=8,lowercase_ : int=3_2,lowercase_ : Dict=1_2_8,lowercase_ : Optional[int]=0.1,lowercase_ : List[str]=1E-6,lowercase_ : Tuple=1.0,lowercase_ : Any="relu",lowercase_ : Union[str, Any]=True,lowercase_ : Optional[Any]=True,lowercase_ : int=0,lowercase_ : str=1,**lowercase_ : str,)-> Any: '''simple docstring''' A__ = vocab_size A__ = d_model A__ = d_kv A__ = d_ff A__ = num_layers A__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry A__ = num_heads A__ = relative_attention_num_buckets A__ = relative_attention_max_distance A__ = dropout_rate A__ = layer_norm_epsilon A__ = initializer_factor A__ = feed_forward_proj A__ = use_cache A__ = self.feed_forward_proj.split('-' ) A__ = act_info[-1] A__ = act_info[0] == 'gated' if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": A__ = 'gelu_new' super().__init__( pad_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,**lowercase_,) class A ( _UpperCAmelCase ): """simple docstring""" @property def snake_case__ ( self : Tuple )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' A__ = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: A__ = 'past_encoder_sequence + sequence' A__ = {0: 'batch'} A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: A__ = {0: 'batch', 1: 'decoder_sequence'} A__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowercase_,direction='inputs' ) return common_inputs @property def snake_case__ ( self : Any )-> int: '''simple docstring''' return 1_3
7
1
from typing import Dict from .base import GenericTensor, Pipeline class A ( _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : int,lowercase_ : Dict=None,lowercase_ : Tuple=None,lowercase_ : List[Any]=None,**lowercase_ : Any )-> Optional[Any]: '''simple docstring''' if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def snake_case__ ( self : Dict,lowercase_ : List[Any],**lowercase_ : Tuple )-> Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework A__ = self.tokenizer(lowercase_,return_tensors=lowercase_,**lowercase_ ) return model_inputs def snake_case__ ( self : Tuple,lowercase_ : int )-> Optional[Any]: '''simple docstring''' A__ = self.model(**lowercase_ ) return model_outputs def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : List[str]=False )-> Any: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[Any],*lowercase_ : int,**lowercase_ : Optional[Any] )-> int: '''simple docstring''' return super().__call__(*lowercase_,**lowercase_ )
7
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: A__ = mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: A__ = max( mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , mf_knapsack(i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , j - wt[i - 1] ) + val[i - 1] , ) A__ = val return f[i][j] def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple: '''simple docstring''' A__ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: A__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: A__ = dp[i - 1][w_] return dp[n][w_], dp def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ) -> Union[str, Any]: '''simple docstring''' if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) A__ = len(SCREAMING_SNAKE_CASE__ ) if num_items != len(SCREAMING_SNAKE_CASE__ ): A__ = ( 'The number of weights must be the same as the number of values.\n' f'But got {num_items} weights and {len(SCREAMING_SNAKE_CASE__ )} values' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ ): if not isinstance(wt[i] , SCREAMING_SNAKE_CASE__ ): A__ = ( 'All weights must be integers but got weight of ' f'type {type(wt[i] )} at index {i}' ) raise TypeError(SCREAMING_SNAKE_CASE__ ) A__ , A__ = knapsack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ = set() _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return optimal_val, example_optional_set def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : set ) -> Optional[int]: '''simple docstring''' if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: optimal_set.add(SCREAMING_SNAKE_CASE__ ) _construct_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i - 1 , j - wt[i - 1] , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowercase_ = [3, 2, 4, 4] lowercase_ = [4, 3, 2, 3] lowercase_ = 4 lowercase_ = 6 lowercase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowercase_ , lowercase_ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowercase_ , lowercase_ = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
7
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'fnet' def __init__( self : Dict,lowercase_ : Optional[Any]=3_2_0_0_0,lowercase_ : Optional[Any]=7_6_8,lowercase_ : Dict=1_2,lowercase_ : int=3_0_7_2,lowercase_ : Any="gelu_new",lowercase_ : Optional[int]=0.1,lowercase_ : Optional[Any]=5_1_2,lowercase_ : Dict=4,lowercase_ : Union[str, Any]=0.02,lowercase_ : List[Any]=1E-12,lowercase_ : str=False,lowercase_ : List[str]=5_1_2,lowercase_ : Optional[int]=3,lowercase_ : Optional[int]=1,lowercase_ : List[Any]=2,**lowercase_ : int,)-> List[Any]: '''simple docstring''' super().__init__(pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,**lowercase_ ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = initializer_range A__ = type_vocab_size A__ = layer_norm_eps A__ = use_tpu_fourier_optimizations A__ = tpu_short_seq_length
7
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = AlbertTokenizer lowerCamelCase = AlbertTokenizerFast lowerCamelCase = True lowerCamelCase = True lowerCamelCase = True def snake_case__ ( self : Dict )-> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = AlbertTokenizer(lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : List[str],lowercase_ : str )-> Any: '''simple docstring''' A__ = 'this is a test' A__ = 'this is a test' return input_text, output_text def snake_case__ ( self : List[Any] )-> Optional[int]: '''simple docstring''' A__ = '<pad>' A__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : List[str] )-> str: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0],'<pad>' ) self.assertEqual(vocab_keys[1],'<unk>' ) self.assertEqual(vocab_keys[-1],'▁eloquent' ) self.assertEqual(len(lowercase_ ),3_0_0_0_0 ) def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size,3_0_0_0_0 ) def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = 'I was born in 92000, and this is falsé.' A__ = tokenizer.tokenize(lowercase_ ) A__ = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(lowercase_ ) A__ = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) def snake_case__ ( self : int )-> int: '''simple docstring''' A__ = AlbertTokenizer(lowercase_,keep_accents=lowercase_ ) A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁this', '▁is', '▁a', '▁test'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[4_8, 2_5, 2_1, 1_2_8_9] ) A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] ) A__ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual(lowercase_,[3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] ) A__ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'],) def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' A__ = AlbertTokenizer(lowercase_ ) A__ = tokenizer.encode('sequence builders' ) A__ = tokenizer.encode('multi-sequence build' ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' A__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='albert-base-v2',revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e',)
7
1
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> Optional[int]: '''simple docstring''' try: A__ = os.environ[key] except KeyError: # KEY isn't set, default to `default`. A__ = default else: # KEY is set, convert it to True or False. try: A__ = strtobool(SCREAMING_SNAKE_CASE__ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'If set, {key} must be yes or no.' ) return _value lowercase_ = parse_flag_from_env("RUN_SLOW", default=False) def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple: '''simple docstring''' return unittest.skip('Test was skipped' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict: '''simple docstring''' return unittest.skipUnless(_run_slow_tests , 'test is slow' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Dict: '''simple docstring''' return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict: '''simple docstring''' return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> str: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[str]: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> int: '''simple docstring''' return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: '''simple docstring''' return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]: '''simple docstring''' return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: '''simple docstring''' return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> List[str]: '''simple docstring''' if test_case is None: return partial(SCREAMING_SNAKE_CASE__ , version=SCREAMING_SNAKE_CASE__ ) return unittest.skipUnless(is_torch_version('>=' , SCREAMING_SNAKE_CASE__ ) , f'test requires torch version >= {version}' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Any: '''simple docstring''' return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]: '''simple docstring''' return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple: '''simple docstring''' return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(SCREAMING_SNAKE_CASE__ ) lowercase_ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> int: '''simple docstring''' return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(SCREAMING_SNAKE_CASE__ ) class A ( unittest.TestCase ): """simple docstring""" lowerCamelCase = True @classmethod def snake_case__ ( cls : List[str] )-> List[str]: '''simple docstring''' A__ = tempfile.mkdtemp() @classmethod def snake_case__ ( cls : int )-> str: '''simple docstring''' if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' if self.clear_on_setup: for path in Path(self.tmpdir ).glob('**/*' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(lowercase_ ) class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : str )-> Optional[int]: '''simple docstring''' super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Dict,lowercase_ : Union[mock.Mock, List[mock.Mock]] )-> str: '''simple docstring''' A__ = mocks if isinstance(lowercase_,(tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any: '''simple docstring''' A__ = AcceleratorState() A__ = tensor[None].clone().to(state.device ) A__ = gather(SCREAMING_SNAKE_CASE__ ).cpu() A__ = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , SCREAMING_SNAKE_CASE__ ): return False return True class A : """simple docstring""" def __init__( self : Tuple,lowercase_ : Tuple,lowercase_ : Dict,lowercase_ : str )-> str: '''simple docstring''' A__ = returncode A__ = stdout A__ = stderr async def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]: '''simple docstring''' while True: A__ = await stream.readline() if line: callback(SCREAMING_SNAKE_CASE__ ) else: break async def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> _RunOutput: '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(SCREAMING_SNAKE_CASE__ ) ) A__ = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE__ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) A__ = [] A__ = [] def tee(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict="" ): A__ = line.decode('utf-8' ).rstrip() sink.append(SCREAMING_SNAKE_CASE__ ) if not quiet: print(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , file=SCREAMING_SNAKE_CASE__ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda SCREAMING_SNAKE_CASE__ : tee(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda SCREAMING_SNAKE_CASE__ : tee(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , sys.stderr , label='stderr:' ) ) ), ] , timeout=SCREAMING_SNAKE_CASE__ , ) return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=180 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=True ) -> _RunOutput: '''simple docstring''' A__ = asyncio.get_event_loop() A__ = loop.run_until_complete( _stream_subprocess(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , stdin=SCREAMING_SNAKE_CASE__ , timeout=SCREAMING_SNAKE_CASE__ , quiet=SCREAMING_SNAKE_CASE__ , echo=SCREAMING_SNAKE_CASE__ ) ) A__ = ' '.join(SCREAMING_SNAKE_CASE__ ) if result.returncode > 0: A__ = '\n'.join(result.stderr ) raise RuntimeError( f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n' f'The combined stderr from workers follows:\n{stderr}' ) return result class A ( _UpperCAmelCase ): """simple docstring""" pass def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=False ) -> List[str]: '''simple docstring''' try: A__ = subprocess.check_output(SCREAMING_SNAKE_CASE__ , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(SCREAMING_SNAKE_CASE__ , 'decode' ): A__ = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'Command `{" ".join(SCREAMING_SNAKE_CASE__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
7
from typing import Dict from .base import GenericTensor, Pipeline class A ( _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : int,lowercase_ : Dict=None,lowercase_ : Tuple=None,lowercase_ : List[Any]=None,**lowercase_ : Any )-> Optional[Any]: '''simple docstring''' if tokenize_kwargs is None: A__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) A__ = truncation A__ = tokenize_kwargs A__ = {} if return_tensors is not None: A__ = return_tensors return preprocess_params, {}, postprocess_params def snake_case__ ( self : Dict,lowercase_ : List[Any],**lowercase_ : Tuple )-> Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework A__ = self.tokenizer(lowercase_,return_tensors=lowercase_,**lowercase_ ) return model_inputs def snake_case__ ( self : Tuple,lowercase_ : int )-> Optional[Any]: '''simple docstring''' A__ = self.model(**lowercase_ ) return model_outputs def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : List[str]=False )-> Any: '''simple docstring''' if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[Any],*lowercase_ : int,**lowercase_ : Optional[Any] )-> int: '''simple docstring''' return super().__call__(*lowercase_,**lowercase_ )
7
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'blenderbot-small' lowerCamelCase = ['past_key_values'] lowerCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any],lowercase_ : List[str]=5_0_2_6_5,lowercase_ : List[str]=5_1_2,lowercase_ : str=8,lowercase_ : Any=2_0_4_8,lowercase_ : List[Any]=1_6,lowercase_ : Tuple=8,lowercase_ : Dict=2_0_4_8,lowercase_ : int=1_6,lowercase_ : Optional[int]=0.0,lowercase_ : str=0.0,lowercase_ : Optional[Any]=True,lowercase_ : List[Any]=True,lowercase_ : Optional[int]="gelu",lowercase_ : Tuple=5_1_2,lowercase_ : Union[str, Any]=0.1,lowercase_ : List[str]=0.0,lowercase_ : str=0.0,lowercase_ : Optional[int]=0.02,lowercase_ : Dict=1,lowercase_ : List[Any]=False,lowercase_ : List[str]=0,lowercase_ : Tuple=1,lowercase_ : List[Any]=2,lowercase_ : Dict=2,**lowercase_ : str,)-> str: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = d_model A__ = encoder_ffn_dim A__ = encoder_layers A__ = encoder_attention_heads A__ = decoder_ffn_dim A__ = decoder_layers A__ = decoder_attention_heads A__ = dropout A__ = attention_dropout A__ = activation_dropout A__ = activation_function A__ = init_std A__ = encoder_layerdrop A__ = decoder_layerdrop A__ = use_cache A__ = encoder_layers A__ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,decoder_start_token_id=lowercase_,forced_eos_token_id=lowercase_,**lowercase_,) class A ( _UpperCAmelCase ): """simple docstring""" @property def snake_case__ ( self : int )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: A__ = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: A__ = {0: 'batch'} A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: A__ = {0: 'batch', 1: 'decoder_sequence'} A__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowercase_,direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. A__ = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: A__ , A__ = self.num_layers for i in range(lowercase_ ): A__ = {0: 'batch', 2: 'past_sequence + sequence'} A__ = {0: 'batch', 2: 'past_sequence + sequence'} else: A__ = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property def snake_case__ ( self : List[Any] )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: A__ = super().outputs else: A__ = super(lowercase_,self ).outputs if self.use_past: A__ , A__ = self.num_layers for i in range(lowercase_ ): A__ = {0: 'batch', 2: 'past_sequence + sequence'} A__ = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def snake_case__ ( self : Tuple,lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]: '''simple docstring''' A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ ) # Generate decoder inputs A__ = seq_length if not self.use_past else 1 A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ ) A__ = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} A__ = dict(**lowercase_,**lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch A__ , A__ = common_inputs['input_ids'].shape A__ = common_inputs['decoder_input_ids'].shape[1] A__ , A__ = self.num_attention_heads A__ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) A__ = decoder_seq_length + 3 A__ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) A__ = torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(lowercase_,lowercase_ )],dim=1 ) A__ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered A__ , A__ = self.num_layers A__ = min(lowercase_,lowercase_ ) A__ = max(lowercase_,lowercase_ ) - min_num_layers A__ = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(lowercase_ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), ) ) # TODO: test this. A__ = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(lowercase_,lowercase_ ): common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) ) return common_inputs def snake_case__ ( self : str,lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]: '''simple docstring''' A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_,lowercase_,lowercase_,lowercase_,lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch A__ , A__ = common_inputs['input_ids'].shape # Not using the same length for past_key_values A__ = seqlen + 2 A__ , A__ = self.num_layers A__ , A__ = self.num_attention_heads A__ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) A__ = common_inputs['attention_mask'].dtype A__ = torch.cat( [common_inputs['attention_mask'], torch.ones(lowercase_,lowercase_,dtype=lowercase_ )],dim=1 ) A__ = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ ) ] return common_inputs def snake_case__ ( self : List[Any],lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]: '''simple docstring''' A__ = compute_effective_axis_dimension( lowercase_,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX A__ = tokenizer.num_special_tokens_to_add(lowercase_ ) A__ = compute_effective_axis_dimension( lowercase_,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=lowercase_ ) # Generate dummy inputs according to compute batch and sequence A__ = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size A__ = dict(tokenizer(lowercase_,return_tensors=lowercase_ ) ) return common_inputs def snake_case__ ( self : Union[str, Any],lowercase_ : PreTrainedTokenizer,lowercase_ : int = -1,lowercase_ : int = -1,lowercase_ : bool = False,lowercase_ : Optional[TensorType] = None,)-> Mapping[str, Any]: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: A__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ ) elif self.task == "causal-lm": A__ = self._generate_dummy_inputs_for_causal_lm( lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ ) else: A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_,batch_size=lowercase_,seq_length=lowercase_,is_pair=lowercase_,framework=lowercase_ ) return common_inputs def snake_case__ ( self : str,lowercase_ : Any,lowercase_ : List[Any],lowercase_ : List[str],lowercase_ : List[str] )-> Any: '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: A__ = super()._flatten_past_key_values_(lowercase_,lowercase_,lowercase_,lowercase_ ) else: A__ = super(lowercase_,self )._flatten_past_key_values_( lowercase_,lowercase_,lowercase_,lowercase_ )
7
from timeit import timeit def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) A__ = 0 while number: number &= number - 1 result += 1 return result def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' if number < 0: raise ValueError('the value of input must not be negative' ) A__ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def _snake_case( ) -> None: '''simple docstring''' def do_benchmark(SCREAMING_SNAKE_CASE__ : int ) -> None: A__ = 'import __main__ as z' print(f'Benchmark when {number = }:' ) print(f'{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE__ ) = }' ) A__ = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=SCREAMING_SNAKE_CASE__ ) print(f'timeit() runs in {timing} seconds' ) print(f'{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE__ ) = }' ) A__ = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=SCREAMING_SNAKE_CASE__ , ) print(f'timeit() runs in {timing} seconds' ) for number in (25, 37, 58, 0): do_benchmark(SCREAMING_SNAKE_CASE__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
7
1
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = AlbertTokenizer lowerCamelCase = AlbertTokenizerFast lowerCamelCase = True lowerCamelCase = True lowerCamelCase = True def snake_case__ ( self : Dict )-> Any: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ = AlbertTokenizer(lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__ ( self : List[str],lowercase_ : str )-> Any: '''simple docstring''' A__ = 'this is a test' A__ = 'this is a test' return input_text, output_text def snake_case__ ( self : List[Any] )-> Optional[int]: '''simple docstring''' A__ = '<pad>' A__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ),lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ),lowercase_ ) def snake_case__ ( self : List[str] )-> str: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0],'<pad>' ) self.assertEqual(vocab_keys[1],'<unk>' ) self.assertEqual(vocab_keys[-1],'▁eloquent' ) self.assertEqual(len(lowercase_ ),3_0_0_0_0 ) def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size,3_0_0_0_0 ) def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = 'I was born in 92000, and this is falsé.' A__ = tokenizer.tokenize(lowercase_ ) A__ = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) A__ = rust_tokenizer.encode(lowercase_,add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(lowercase_ ) A__ = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_,lowercase_ ) def snake_case__ ( self : int )-> int: '''simple docstring''' A__ = AlbertTokenizer(lowercase_,keep_accents=lowercase_ ) A__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowercase_,['▁this', '▁is', '▁a', '▁test'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ),[4_8, 2_5, 2_1, 1_2_8_9] ) A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] ) A__ = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual(lowercase_,[3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] ) A__ = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_,['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'],) def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' A__ = AlbertTokenizer(lowercase_ ) A__ = tokenizer.encode('sequence builders' ) A__ = tokenizer.encode('multi-sequence build' ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ ) A__ = tokenizer.build_inputs_with_special_tokens(lowercase_,lowercase_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def snake_case__ ( self : Any )-> Tuple: '''simple docstring''' A__ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_,model_name='albert-base-v2',revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e',)
7
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int: '''simple docstring''' A__ = 384 A__ = 7 if "tiny" in model_name: A__ = 96 A__ = (2, 2, 6, 2) A__ = (3, 6, 12, 24) elif "small" in model_name: A__ = 96 A__ = (2, 2, 18, 2) A__ = (3, 6, 12, 24) elif "base" in model_name: A__ = 128 A__ = (2, 2, 18, 2) A__ = (4, 8, 16, 32) A__ = 12 A__ = 512 elif "large" in model_name: A__ = 192 A__ = (2, 2, 18, 2) A__ = (6, 12, 24, 48) A__ = 12 A__ = 768 # set label information A__ = 150 A__ = 'huggingface/label-files' A__ = 'ade20k-id2label.json' A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) ) A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} A__ = {v: k for k, v in idalabel.items()} A__ = SwinConfig( embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) A__ = UperNetConfig( backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , ) return config def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: '''simple docstring''' A__ = [] # fmt: off # stem rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') ) rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') ) if i < 3: rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') ) rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') ) rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]: '''simple docstring''' A__ = dct.pop(SCREAMING_SNAKE_CASE__ ) A__ = val def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: '''simple docstring''' A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): A__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' ) A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[:dim, :] A__ = in_proj_bias[: dim] A__ = in_proj_weight[ dim : dim * 2, : ] A__ = in_proj_bias[ dim : dim * 2 ] A__ = in_proj_weight[ -dim :, : ] A__ = in_proj_bias[-dim :] # fmt: on def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 ) A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]: '''simple docstring''' A__ , A__ = x.shape A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 ) A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(4 , in_channel // 4 ) A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]: '''simple docstring''' A__ = x.shape[0] A__ = x.reshape(in_channel // 4 , 4 ) A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ ) return x def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' A__ = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } A__ = model_name_to_url[model_name] A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[ 'state_dict' ] for name, param in state_dict.items(): print(SCREAMING_SNAKE_CASE__ , param.shape ) A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ ) A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "bn" in key: A__ = key.replace('bn' , 'batch_norm' ) A__ = val # rename keys A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ ) if "norm" in key: A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) # verify on image A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' ) A__ = SegformerImageProcessor() A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits print(logits.shape ) print('First values of logits:' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": A__ = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": A__ = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": A__ = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": A__ = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(f'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: print(f'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(f'openmmlab/{model_name}' ) processor.push_to_hub(f'openmmlab/{model_name}' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-swin-tiny", type=str, choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]], help="Name of the Swin + UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowercase_ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
7
1
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def _snake_case( ) -> Dict: '''simple docstring''' A__ = ArgumentParser( 'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE__ ) A__ = parser.add_subparsers(help='datasets-cli command helpers' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) TestCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) # Parse args A__ , A__ = parser.parse_known_args() if not hasattr(SCREAMING_SNAKE_CASE__ , 'func' ): parser.print_help() exit(1 ) A__ = parse_unknown_args(SCREAMING_SNAKE_CASE__ ) # Run A__ = args.func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) service.run() if __name__ == "__main__": main()
7
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed lowercase_ = "true" def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=82 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 ) -> Optional[Any]: '''simple docstring''' set_seed(42 ) A__ = RegressionModel() A__ = deepcopy(SCREAMING_SNAKE_CASE__ ) A__ = RegressionDataset(length=SCREAMING_SNAKE_CASE__ ) A__ = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) model.to(accelerator.device ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model, ddp_model, dataloader def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' ) A__ = load_dataset('glue' , 'mrpc' , split='validation' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : List[Any] ): A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs with accelerator.main_process_first(): A__ = dataset.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) A__ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(SCREAMING_SNAKE_CASE__ : Dict ): if use_longest: return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=128 , return_tensors='pt' ) return DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=16 ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> str: '''simple docstring''' A__ = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE__ , split_batches=SCREAMING_SNAKE_CASE__ ) A__ = get_dataloader(SCREAMING_SNAKE_CASE__ , not dispatch_batches ) A__ = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: '''simple docstring''' A__ = [] for batch in dataloader: A__ , A__ = batch.values() with torch.no_grad(): A__ = model(SCREAMING_SNAKE_CASE__ ) A__ , A__ = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) A__ , A__ = [], [] for logit, targ in logits_and_targets: logits.append(SCREAMING_SNAKE_CASE__ ) targs.append(SCREAMING_SNAKE_CASE__ ) A__ , A__ = torch.cat(SCREAMING_SNAKE_CASE__ ), torch.cat(SCREAMING_SNAKE_CASE__ ) return logits, targs def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int=82 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=16 ) -> List[Any]: '''simple docstring''' A__ , A__ , A__ = get_basic_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) A__ , A__ = generate_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert ( len(SCREAMING_SNAKE_CASE__ ) == num_samples ), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE__ )}' def _snake_case( SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False ) -> str: '''simple docstring''' A__ = evaluate.load('glue' , 'mrpc' ) A__ , A__ = get_mrpc_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # First do baseline A__ , A__ , A__ = setup['no'] model.to(SCREAMING_SNAKE_CASE__ ) model.eval() for batch in dataloader: batch.to(SCREAMING_SNAKE_CASE__ ) with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=batch['labels'] ) A__ = metric.compute() # Then do distributed A__ , A__ , A__ = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): A__ = model(**SCREAMING_SNAKE_CASE__ ) A__ = outputs.logits.argmax(dim=-1 ) A__ = batch['labels'] A__ , A__ = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ ) A__ = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n' def _snake_case( ) -> Optional[Any]: '''simple docstring''' A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' ) test_mrpc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ ) if accelerator.is_local_main_process: print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' ) test_torch_metrics(SCREAMING_SNAKE_CASE__ , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**' ) A__ = Accelerator() test_torch_metrics(SCREAMING_SNAKE_CASE__ , 512 ) accelerator.state._reset_state() def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]: '''simple docstring''' main() if __name__ == "__main__": main()
7
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = StableDiffusionXLImgaImgPipeline lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'} lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'} lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) A__ = UNetaDConditionModel( block_out_channels=(3_2, 6_4),layers_per_block=2,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'),up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'),attention_head_dim=(2, 4),use_linear_projection=lowercase_,addition_embed_type='text_time',addition_time_embed_dim=8,transformer_layers_per_block=(1, 2),projection_class_embeddings_input_dim=8_0,cross_attention_dim=6_4,) A__ = EulerDiscreteScheduler( beta_start=0.00_085,beta_end=0.012,steps_offset=1,beta_schedule='scaled_linear',timestep_spacing='leading',) torch.manual_seed(0 ) A__ = AutoencoderKL( block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,) torch.manual_seed(0 ) A__ = CLIPTextConfig( bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=3_2,) A__ = CLIPTextModel(lowercase_ ) A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip',local_files_only=lowercase_ ) A__ = CLIPTextModelWithProjection(lowercase_ ) A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip',local_files_only=lowercase_ ) A__ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'text_encoder_2': text_encoder_a, 'tokenizer_2': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def snake_case__ ( self : List[str],lowercase_ : Optional[Any],lowercase_ : str=0 )-> Optional[int]: '''simple docstring''' A__ = floats_tensor((1, 3, 3_2, 3_2),rng=random.Random(lowercase_ ) ).to(lowercase_ ) A__ = image / 2 + 0.5 if str(lowercase_ ).startswith('mps' ): A__ = torch.manual_seed(lowercase_ ) else: A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) A__ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 5.0, 'output_type': 'numpy', 'strength': 0.75, } return inputs def snake_case__ ( self : Union[str, Any] )-> str: '''simple docstring''' A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator A__ = self.get_dummy_components() A__ = StableDiffusionXLImgaImgPipeline(**lowercase_ ) A__ = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) A__ = self.get_dummy_inputs(lowercase_ ) A__ = sd_pipe(**lowercase_ ).images A__ = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) A__ = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def snake_case__ ( self : List[Any] )-> str: '''simple docstring''' super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def snake_case__ ( self : Tuple )-> Dict: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def snake_case__ ( self : int )-> Tuple: '''simple docstring''' pass def snake_case__ ( self : List[str] )-> Tuple: '''simple docstring''' A__ = self.get_dummy_components() A__ = StableDiffusionXLImgaImgPipeline(**lowercase_ ) A__ = sd_pipe.to(lowercase_ ) A__ = sd_pipe.to(lowercase_ ) sd_pipe.set_progress_bar_config(disable=lowercase_ ) # forward without prompt embeds A__ = self.get_dummy_inputs(lowercase_ ) A__ = 3 * ['this is a negative prompt'] A__ = negative_prompt A__ = 3 * [inputs['prompt']] A__ = sd_pipe(**lowercase_ ) A__ = output.images[0, -3:, -3:, -1] # forward with prompt embeds A__ = self.get_dummy_inputs(lowercase_ ) A__ = 3 * ['this is a negative prompt'] A__ = 3 * [inputs.pop('prompt' )] ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = sd_pipe.encode_prompt(lowercase_,negative_prompt=lowercase_ ) A__ = sd_pipe( **lowercase_,prompt_embeds=lowercase_,negative_prompt_embeds=lowercase_,pooled_prompt_embeds=lowercase_,negative_pooled_prompt_embeds=lowercase_,) A__ = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @slow @require_torch_gpu class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Union[str, Any] )-> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : Optional[Any],lowercase_ : Union[str, Any],lowercase_ : Any="cpu",lowercase_ : str=torch.floataa,lowercase_ : str=0 )-> List[str]: '''simple docstring''' A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) A__ = np.random.RandomState(lowercase_ ).standard_normal((1, 4, 6_4, 6_4) ) A__ = torch.from_numpy(lowercase_ ).to(device=lowercase_,dtype=lowercase_ ) A__ = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def snake_case__ ( self : List[Any] )-> Dict: '''simple docstring''' A__ = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) A__ = self.get_inputs(lowercase_ ) A__ = pipe(**lowercase_ ).images A__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) A__ = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] ) assert np.abs(image_slice - expected_slice ).max() < 7E-3
7
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: '''simple docstring''' A__ = 0 A__ = len(SCREAMING_SNAKE_CASE__ ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None A__ = sorted_collection[point] if current_item == item: return point else: if point < left: A__ = left A__ = point elif point > right: A__ = right A__ = point else: if item < current_item: A__ = point - 1 else: A__ = point + 1 return None def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None A__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(SCREAMING_SNAKE_CASE__ ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) elif point > right: return interpolation_search_by_recursion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point - 1 ) else: return interpolation_search_by_recursion( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , point + 1 , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' if collection != sorted(SCREAMING_SNAKE_CASE__ ): raise ValueError('Collection must be ascending sorted' ) return True if __name__ == "__main__": import sys lowercase_ = 0 if debug == 1: lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") lowercase_ = 67 lowercase_ = interpolation_search(collection, target) if result is not None: print(f"""{target} found at positions: {result}""") else: print("Not found")
7
1
import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class A : """simple docstring""" def __init__( self : int,lowercase_ : List[str],lowercase_ : List[str]=1_3,lowercase_ : Tuple=7,lowercase_ : Any=False,lowercase_ : Optional[int]=True,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : Tuple=3_3,lowercase_ : Any=3_2,lowercase_ : int=5,lowercase_ : Optional[Any]=4,lowercase_ : int=3_7,lowercase_ : Tuple="gelu",lowercase_ : List[Any]=0.1,lowercase_ : Tuple=0.1,lowercase_ : Any=5_1_2,lowercase_ : Any=1_6,lowercase_ : List[str]=2,lowercase_ : Any=0.02,lowercase_ : Union[str, Any]=3,lowercase_ : str=4,lowercase_ : int=None,)-> Optional[int]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope def snake_case__ ( self : Tuple )-> Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) A__ = ids_tensor([self.batch_size],self.num_choices ) A__ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__ ( self : Dict )-> Any: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size,hidden_size=self.hidden_size,pad_token_id=1,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,) def snake_case__ ( self : Any,lowercase_ : Tuple,lowercase_ : List[str],lowercase_ : Union[str, Any],lowercase_ : List[str],lowercase_ : Tuple,lowercase_ : str )-> str: '''simple docstring''' A__ = EsmModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) A__ = model(lowercase_ ) A__ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.hidden_size) ) def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : Dict,lowercase_ : Dict,lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Any )-> List[Any]: '''simple docstring''' A__ = EsmForMaskedLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : Dict,lowercase_ : List[str],lowercase_ : Any,lowercase_ : str,lowercase_ : str )-> List[Any]: '''simple docstring''' A__ = self.num_labels A__ = EsmForTokenClassification(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = False lowerCamelCase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase = () lowerCamelCase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase = True def snake_case__ ( self : str )-> str: '''simple docstring''' A__ = EsmModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 ) def snake_case__ ( self : Optional[int] )-> Any: '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self : Optional[int] )-> Any: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : Tuple )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A__ = type self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase_ ) def snake_case__ ( self : List[str] )-> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) @slow def snake_case__ ( self : Tuple )-> List[str]: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = EsmModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def snake_case__ ( self : Tuple )-> List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs()[0] A__ = EsmEmbeddings(config=lowercase_ ) A__ = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] ) A__ = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) A__ = create_position_ids_from_input_ids(lowercase_,model.padding_idx ) self.assertEqual(position_ids.shape,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(lowercase_,lowercase_ ) ) ) def snake_case__ ( self : Optional[Any] )-> List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs()[0] A__ = EsmEmbeddings(config=lowercase_ ) A__ = torch.empty(2,4,3_0 ) A__ = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] A__ = torch.as_tensor([expected_single_positions, expected_single_positions] ) A__ = embeddings.create_position_ids_from_inputs_embeds(lowercase_ ) self.assertEqual(position_ids.shape,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(lowercase_,lowercase_ ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def snake_case__ ( self : Optional[Any] )-> Any: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def snake_case__ ( self : Optional[int] )-> int: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def snake_case__ ( self : Optional[Any] )-> Any: '''simple docstring''' pass @require_torch class A ( _UpperCAmelCase ): """simple docstring""" @slow def snake_case__ ( self : str )-> int: '''simple docstring''' with torch.no_grad(): A__ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() A__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) A__ = model(lowercase_ )[0] A__ = 3_3 A__ = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape,lowercase_ ) A__ = torch.tensor( [[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3],lowercase_,atol=1E-4 ) ) @slow def snake_case__ ( self : int )-> Any: '''simple docstring''' with torch.no_grad(): A__ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() A__ = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] ) A__ = model(lowercase_ )[0] # compare the actual values for a slice. A__ = torch.tensor( [[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3],lowercase_,atol=1E-4 ) )
7
from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple: '''simple docstring''' return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def _snake_case( ) -> Dict: '''simple docstring''' A__ = ArgumentParser( 'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=SCREAMING_SNAKE_CASE__ ) A__ = parser.add_subparsers(help='datasets-cli command helpers' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) TestCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) # Parse args A__ , A__ = parser.parse_known_args() if not hasattr(SCREAMING_SNAKE_CASE__ , 'func' ): parser.print_help() exit(1 ) A__ = parse_unknown_args(SCREAMING_SNAKE_CASE__ ) # Run A__ = args.func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) service.run() if __name__ == "__main__": main()
7
1
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A : """simple docstring""" def __init__( self : Union[str, Any],lowercase_ : Any,lowercase_ : Union[str, Any]=1_3,lowercase_ : Tuple=3_0,lowercase_ : List[Any]=2,lowercase_ : Optional[int]=3,lowercase_ : Union[str, Any]=True,lowercase_ : Tuple=True,lowercase_ : Any=3_2,lowercase_ : List[str]=2,lowercase_ : Optional[int]=4,lowercase_ : Union[str, Any]=3_7,lowercase_ : Tuple="gelu",lowercase_ : str=0.1,lowercase_ : Tuple=0.1,lowercase_ : Union[str, Any]=1_0,lowercase_ : int=0.02,lowercase_ : List[Any]=3,lowercase_ : Any=None,)-> Dict: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A__ = (image_size // patch_size) ** 2 A__ = num_patches + 1 def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' return ViTConfig( image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=lowercase_,initializer_range=self.initializer_range,) def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Tuple )-> Optional[Any]: '''simple docstring''' A__ = TFViTModel(config=lowercase_ ) A__ = model(lowercase_,training=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) A__ = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) ) def snake_case__ ( self : List[Any],lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : List[Any] )-> Dict: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFViTForImageClassification(lowercase_ ) A__ = model(lowercase_,labels=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ = 1 A__ = TFViTForImageClassification(lowercase_ ) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () lowerCamelCase = ( {'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' A__ = TFViTModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' pass @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Any )-> int: '''simple docstring''' pass def snake_case__ ( self : str )-> Dict: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_,tf.keras.layers.Layer ) ) def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) A__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1],lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : Optional[Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(lowercase_ ) def _snake_case( ) -> str: '''simple docstring''' A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : List[Any] )-> str: '''simple docstring''' return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def snake_case__ ( self : Any )-> Dict: '''simple docstring''' A__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=lowercase_,return_tensors='tf' ) # forward pass A__ = model(**lowercase_ ) # verify the logits A__ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape,lowercase_ ) A__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] ) tf.debugging.assert_near(outputs.logits[0, :3],lowercase_,atol=1E-4 )
7
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A : """simple docstring""" def __init__( self : Union[str, Any],lowercase_ : Any,lowercase_ : Union[str, Any]=1_3,lowercase_ : Tuple=3_0,lowercase_ : List[Any]=2,lowercase_ : Optional[int]=3,lowercase_ : Union[str, Any]=True,lowercase_ : Tuple=True,lowercase_ : Any=3_2,lowercase_ : List[str]=2,lowercase_ : Optional[int]=4,lowercase_ : Union[str, Any]=3_7,lowercase_ : Tuple="gelu",lowercase_ : str=0.1,lowercase_ : Tuple=0.1,lowercase_ : Union[str, Any]=1_0,lowercase_ : int=0.02,lowercase_ : List[Any]=3,lowercase_ : Any=None,)-> Dict: '''simple docstring''' A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A__ = (image_size // patch_size) ** 2 A__ = num_patches + 1 def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' return ViTConfig( image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=lowercase_,initializer_range=self.initializer_range,) def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Tuple )-> Optional[Any]: '''simple docstring''' A__ = TFViTModel(config=lowercase_ ) A__ = model(lowercase_,training=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) A__ = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) ) def snake_case__ ( self : List[Any],lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : List[Any] )-> Dict: '''simple docstring''' A__ = self.type_sequence_label_size A__ = TFViTForImageClassification(lowercase_ ) A__ = model(lowercase_,labels=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. A__ = self.image_size // 2 A__ = pixel_values[:, :, :image_size, :image_size] A__ = model(lowercase_,interpolate_pos_encoding=lowercase_,training=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ = 1 A__ = TFViTForImageClassification(lowercase_ ) A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () lowerCamelCase = ( {'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : int )-> List[Any]: '''simple docstring''' A__ = TFViTModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,has_text_modality=lowercase_,hidden_size=3_7 ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' pass @unittest.skip(reason='ViT does not use inputs_embeds' ) def snake_case__ ( self : Any )-> int: '''simple docstring''' pass def snake_case__ ( self : str )-> Dict: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_,tf.keras.layers.Layer ) ) def snake_case__ ( self : int )-> List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(lowercase_ ) A__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ['pixel_values'] self.assertListEqual(arg_names[:1],lowercase_ ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def snake_case__ ( self : Optional[Any] )-> Optional[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) @slow def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ = TFViTModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(lowercase_ ) def _snake_case( ) -> str: '''simple docstring''' A__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class A ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : List[Any] )-> str: '''simple docstring''' return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def snake_case__ ( self : Any )-> Dict: '''simple docstring''' A__ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=lowercase_,return_tensors='tf' ) # forward pass A__ = model(**lowercase_ ) # verify the logits A__ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape,lowercase_ ) A__ = tf.constant([-0.2_744, 0.8_215, -0.0_836] ) tf.debugging.assert_near(outputs.logits[0, :3],lowercase_,atol=1E-4 )
7
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'timesformer' def __init__( self : List[Any],lowercase_ : int=2_2_4,lowercase_ : Optional[Any]=1_6,lowercase_ : Dict=3,lowercase_ : Optional[Any]=8,lowercase_ : str=7_6_8,lowercase_ : List[Any]=1_2,lowercase_ : List[Any]=1_2,lowercase_ : int=3_0_7_2,lowercase_ : Tuple="gelu",lowercase_ : int=0.0,lowercase_ : Any=0.0,lowercase_ : str=0.02,lowercase_ : int=1E-6,lowercase_ : List[str]=True,lowercase_ : Any="divided_space_time",lowercase_ : List[Any]=0,**lowercase_ : Union[str, Any],)-> int: '''simple docstring''' super().__init__(**lowercase_ ) A__ = image_size A__ = patch_size A__ = num_channels A__ = num_frames A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = layer_norm_eps A__ = qkv_bias A__ = attention_type A__ = drop_path_rate
7
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class A : """simple docstring""" def __init__( self : str,lowercase_ : Any,lowercase_ : Tuple=1_3,lowercase_ : str=7,lowercase_ : Tuple=True,lowercase_ : int=True,lowercase_ : List[Any]=True,lowercase_ : List[str]=True,lowercase_ : List[str]=9_9,lowercase_ : List[Any]=6_4,lowercase_ : List[str]=5,lowercase_ : Optional[Any]=4,lowercase_ : Optional[Any]=3_7,lowercase_ : Optional[Any]="gelu",lowercase_ : int=0.1,lowercase_ : str=0.1,lowercase_ : Optional[Any]=5_1_2,lowercase_ : int=1_6,lowercase_ : List[Any]=2,lowercase_ : Union[str, Any]=0.02,lowercase_ : Tuple=3,lowercase_ : List[Any]=4,lowercase_ : str=None,)-> Union[str, Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope A__ = vocab_size - 1 def snake_case__ ( self : str )-> Optional[Any]: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size ) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels ) A__ = self.get_config() return config, input_ids, input_mask, token_labels def snake_case__ ( self : List[Any] )-> Tuple: '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,pad_token_id=self.pad_token_id,) def snake_case__ ( self : Optional[int] )-> Union[str, Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = True return config, input_ids, input_mask, token_labels def snake_case__ ( self : Any,lowercase_ : List[Any],lowercase_ : List[Any],lowercase_ : str )-> Any: '''simple docstring''' A__ = GPTNeoXModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) A__ = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Union[str, Any],lowercase_ : List[str],lowercase_ : Dict,lowercase_ : Optional[Any] )-> Tuple: '''simple docstring''' A__ = True A__ = GPTNeoXModel(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Union[str, Any],lowercase_ : str,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : List[str] )-> List[str]: '''simple docstring''' A__ = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Optional[int],lowercase_ : Optional[int],lowercase_ : Optional[int],lowercase_ : Dict,lowercase_ : Any )-> int: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForQuestionAnswering(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_ ) self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) ) def snake_case__ ( self : List[str],lowercase_ : List[str],lowercase_ : int,lowercase_ : Union[str, Any],lowercase_ : Optional[int] )-> str: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = ids_tensor([self.batch_size],self.type_sequence_label_size ) A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) ) def snake_case__ ( self : Any,lowercase_ : Union[str, Any],lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : int )-> Union[str, Any]: '''simple docstring''' A__ = self.num_labels A__ = GPTNeoXForTokenClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A__ = model(lowercase_,attention_mask=lowercase_,labels=lowercase_ ) self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : int,lowercase_ : str,lowercase_ : int,lowercase_ : Union[str, Any] )-> List[Any]: '''simple docstring''' A__ = True A__ = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() # first forward pass A__ = model(lowercase_,attention_mask=lowercase_,use_cache=lowercase_ ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3),config.vocab_size ) A__ = ids_tensor((self.batch_size, 3),vocab_size=2 ) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens],dim=-1 ) A__ = torch.cat([input_mask, next_mask],dim=-1 ) A__ = model(lowercase_,attention_mask=lowercase_,output_hidden_states=lowercase_ ) A__ = output_from_no_past['hidden_states'][0] A__ = model( lowercase_,attention_mask=lowercase_,past_key_values=lowercase_,output_hidden_states=lowercase_,)['hidden_states'][0] # select random slice A__ = ids_tensor((1,),output_from_past.shape[-1] ).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-3 ) ) def snake_case__ ( self : str )-> Union[str, Any]: '''simple docstring''' A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowerCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowerCamelCase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = GPTNeoXModelTester(self ) A__ = ConfigTester(self,config_class=lowercase_,hidden_size=6_4,num_attention_heads=8 ) def snake_case__ ( self : Optional[Any] )-> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def snake_case__ ( self : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Dict )-> List[Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : List[str] )-> Any: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_,lowercase_,lowercase_ ) def snake_case__ ( self : Dict )-> Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowercase_ ) def snake_case__ ( self : Tuple )-> List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_ ) def snake_case__ ( self : Any )-> List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase_ ) def snake_case__ ( self : str )-> Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) @unittest.skip(reason='Feed forward chunking is not implemented' ) def snake_case__ ( self : Union[str, Any] )-> Optional[Any]: '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)] ) def snake_case__ ( self : List[str],lowercase_ : Any )-> List[str]: '''simple docstring''' A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = ids_tensor([1, 1_0],config.vocab_size ) A__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )],config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights A__ = GPTNeoXModel(lowercase_ ) original_model.to(lowercase_ ) original_model.eval() A__ = original_model(lowercase_ ).last_hidden_state A__ = original_model(lowercase_ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights A__ = {'type': scaling_type, 'factor': 10.0} A__ = GPTNeoXModel(lowercase_ ) scaled_model.to(lowercase_ ) scaled_model.eval() A__ = scaled_model(lowercase_ ).last_hidden_state A__ = scaled_model(lowercase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) else: self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) @require_torch class A ( unittest.TestCase ): """simple docstring""" @slow def snake_case__ ( self : Tuple )-> Union[str, Any]: '''simple docstring''' A__ = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' ) for checkpointing in [True, False]: A__ = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowercase_ ) A__ = tokenizer('My favorite food is',return_tensors='pt' ).to(lowercase_ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 A__ = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure' A__ = model.generate(**lowercase_,do_sample=lowercase_,max_new_tokens=2_0 ) A__ = tokenizer.batch_decode(lowercase_ )[0] self.assertEqual(lowercase_,lowercase_ )
7
1
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class A ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase = FlaxAutoencoderKL @property def snake_case__ ( self : Any )-> int: '''simple docstring''' A__ = 4 A__ = 3 A__ = (3_2, 3_2) A__ = jax.random.PRNGKey(0 ) A__ = jax.random.uniform(lowercase_,((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def snake_case__ ( self : Any )-> str: '''simple docstring''' A__ = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } A__ = self.dummy_input return init_dict, inputs_dict
7
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class A ( _UpperCAmelCase ): """simple docstring""" lowerCamelCase = 'open-llama' def __init__( self : Any,lowercase_ : Optional[int]=1_0_0_0_0_0,lowercase_ : Union[str, Any]=4_0_9_6,lowercase_ : Dict=1_1_0_0_8,lowercase_ : Dict=3_2,lowercase_ : Optional[int]=3_2,lowercase_ : Dict="silu",lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Optional[int]=0.02,lowercase_ : Dict=1E-6,lowercase_ : Dict=True,lowercase_ : List[Any]=0,lowercase_ : Optional[int]=1,lowercase_ : str=2,lowercase_ : str=False,lowercase_ : str=True,lowercase_ : int=0.1,lowercase_ : List[Any]=0.1,lowercase_ : List[Any]=True,lowercase_ : Union[str, Any]=True,lowercase_ : Any=None,**lowercase_ : List[Any],)-> Tuple: '''simple docstring''' A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = intermediate_size A__ = num_hidden_layers A__ = num_attention_heads A__ = hidden_act A__ = initializer_range A__ = rms_norm_eps A__ = use_cache A__ = kwargs.pop( 'use_memorry_efficient_attention',lowercase_ ) A__ = hidden_dropout_prob A__ = attention_dropout_prob A__ = use_stable_embedding A__ = shared_input_output_embedding A__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_,bos_token_id=lowercase_,eos_token_id=lowercase_,tie_word_embeddings=lowercase_,**lowercase_,) def snake_case__ ( self : str )-> str: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling,lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'got {self.rope_scaling}' ) A__ = self.rope_scaling.get('type',lowercase_ ) A__ = self.rope_scaling.get('factor',lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(lowercase_,lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
7
1