code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class A_ ( unittest.TestCase , _a ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = load_tool('''text-classification''' )
self.tool.setup()
__UpperCAmelCase = load_tool('''text-classification''' , remote=lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(lowercase__ , '''positive''' )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.remote_tool('''That\'s quite cool''' , ['''positive''', '''negative'''] )
self.assertEqual(lowercase__ , '''positive''' )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = self.tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(lowercase__ , '''positive''' )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = self.remote_tool(text='''That\'s quite cool''' , labels=['''positive''', '''negative'''] )
self.assertEqual(lowercase__ , '''positive''' )
| 333 |
import doctest
from collections import deque
import numpy as np
class A_ :
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = [2, 1, 2, -1]
__UpperCAmelCase = [1, 2, 3, 4]
def lowerCAmelCase_ (self ) -> list[float]:
__UpperCAmelCase = len(self.first_signal )
__UpperCAmelCase = len(self.second_signal )
__UpperCAmelCase = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 333 | 1 |
from math import factorial, pi
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 3_0 ) -> float:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 3_0 ) -> float:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 333 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( _a ):
'''simple docstring'''
a__ = "pegasus"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def lowerCAmelCase_ (self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ (self ) -> int:
return self.d_model
| 333 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class A_ ( _a ):
'''simple docstring'''
a__ = "altclip_text_model"
def __init__(self , lowercase__=250_002 , lowercase__=1_024 , lowercase__=24 , lowercase__=16 , lowercase__=4_096 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=514 , lowercase__=1 , lowercase__=0.02 , lowercase__=0.02 , lowercase__=1E-05 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=True , lowercase__=768 , **lowercase__ , ) -> Dict:
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = initializer_factor
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = project_dim
class A_ ( _a ):
'''simple docstring'''
a__ = "altclip_vision_model"
def __init__(self , lowercase__=768 , lowercase__=3_072 , lowercase__=512 , lowercase__=12 , lowercase__=12 , lowercase__=3 , lowercase__=224 , lowercase__=32 , lowercase__="quick_gelu" , lowercase__=1E-5 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=1.0 , **lowercase__ , ) -> Any:
super().__init__(**lowercase__ )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = projection_dim
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = num_channels
__UpperCAmelCase = patch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = initializer_factor
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = hidden_act
@classmethod
def lowerCAmelCase_ (cls , lowercase__ , **lowercase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase__ )
__UpperCAmelCase , __UpperCAmelCase = cls.get_config_dict(lowercase__ , **lowercase__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
__UpperCAmelCase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase__ , **lowercase__ )
class A_ ( _a ):
'''simple docstring'''
a__ = "altclip"
a__ = True
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=768 , lowercase__=2.6592 , **lowercase__ ) -> Union[str, Any]:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
__UpperCAmelCase = kwargs.pop('''text_config_dict''' , lowercase__ )
__UpperCAmelCase = kwargs.pop('''vision_config_dict''' , lowercase__ )
super().__init__(**lowercase__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__UpperCAmelCase = {}
# This is the complete result when using `text_config_dict`.
__UpperCAmelCase = AltCLIPTextConfig(**lowercase__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__UpperCAmelCase = (
F'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
F'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCAmelCase = (
F'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
F'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__UpperCAmelCase = {}
# This is the complete result when using `vision_config_dict`.
__UpperCAmelCase = AltCLIPVisionConfig(**lowercase__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__UpperCAmelCase = {
str(lowercase__ ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__UpperCAmelCase = (
F'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
F'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
__UpperCAmelCase = (
F'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
F'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__UpperCAmelCase = {}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
__UpperCAmelCase = {}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
__UpperCAmelCase = AltCLIPTextConfig(**lowercase__ )
__UpperCAmelCase = AltCLIPVisionConfig(**lowercase__ )
__UpperCAmelCase = projection_dim
__UpperCAmelCase = logit_scale_init_value
__UpperCAmelCase = 1.0
@classmethod
def lowerCAmelCase_ (cls , lowercase__ , lowercase__ , **lowercase__ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase__ )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = copy.deepcopy(self.__dict__ )
__UpperCAmelCase = self.text_config.to_dict()
__UpperCAmelCase = self.vision_config.to_dict()
__UpperCAmelCase = self.__class__.model_type
return output
| 333 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = LongformerTokenizer
a__ = True
a__ = LongformerTokenizerFast
a__ = True
def lowerCAmelCase_ (self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCAmelCase = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def lowerCAmelCase_ (self , **lowercase__ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True)
self.assertListEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokens + [tokenizer.unk_token]
__UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = '''Encode this sequence.'''
__UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase__ , lowercase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
# Testing spaces after special tokens
__UpperCAmelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
__UpperCAmelCase = '''Encode <mask> sequence'''
__UpperCAmelCase = '''Encode <mask>sequence'''
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
pass
def lowerCAmelCase_ (self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
__UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase_ (self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}'''
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
| 333 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A_ : List[Any] = None
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
A_ : Any = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
A_ : List[Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
A_ : Any = '▁'
# Segments (not really needed)
A_ : str = 0
A_ : int = 1
A_ : str = 2
A_ : Union[str, Any] = 3
A_ : List[str] = 4
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = "left"
a__ = XLNetTokenizer
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<unk>" , lowercase__="<sep>" , lowercase__="<pad>" , lowercase__="<cls>" , lowercase__="<mask>" , lowercase__=["<eop>", "<eod>"] , **lowercase__ , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
vocab_file=lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , )
__UpperCAmelCase = 3
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = remove_space
__UpperCAmelCase = keep_accents
__UpperCAmelCase = vocab_file
__UpperCAmelCase = False if not self.vocab_file else True
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowercase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ):
copyfile(self.vocab_file , lowercase__ )
return (out_vocab_file,)
| 333 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_000}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = 10
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ (self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 333 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
A_ : str = 'bart'
A_ : List[str] = True
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def __a ( ) -> Union[str, Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__UpperCAmelCase = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__UpperCAmelCase = qar_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
__UpperCAmelCase = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__UpperCAmelCase = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__UpperCAmelCase = sas_model.eval()
else:
__UpperCAmelCase , __UpperCAmelCase = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def __a ( ) -> Dict:
'''simple docstring'''
if LOAD_DENSE_INDEX:
__UpperCAmelCase = faiss.StandardGpuResources()
__UpperCAmelCase = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
__UpperCAmelCase = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_2_8) , )
__UpperCAmelCase = faiss.IndexFlatIP(1_2_8 )
__UpperCAmelCase = faiss.index_cpu_to_gpu(SCREAMING_SNAKE_CASE , 1 , SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__UpperCAmelCase , __UpperCAmelCase = (None, None)
__UpperCAmelCase = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=SCREAMING_SNAKE_CASE )
def __a ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__UpperCAmelCase = elia['''train_eli5''']
__UpperCAmelCase = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_2_8) )
__UpperCAmelCase = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
A_ , A_ , A_ : Any = load_indexes()
A_ , A_ , A_ , A_ : int = load_models()
A_ , A_ : Dict = load_train_data()
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1_0 ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = embed_questions_for_retrieval([question] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase = eli5_train_q_index.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [elia_train[int(SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="wiki40b" , SCREAMING_SNAKE_CASE="dense" , SCREAMING_SNAKE_CASE=1_0 ) -> Tuple:
'''simple docstring'''
if source == "none":
__UpperCAmelCase , __UpperCAmelCase = (''' <P> '''.join(['''''' for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
__UpperCAmelCase , __UpperCAmelCase = query_qa_dense_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase , __UpperCAmelCase = query_es_index(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index_name='''english_wiki40b_snippets_100w''' , n_results=SCREAMING_SNAKE_CASE , )
__UpperCAmelCase = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__UpperCAmelCase = '''question: {} context: {}'''.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda SCREAMING_SNAKE_CASE : None),
} )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=6_4 , SCREAMING_SNAKE_CASE=2_5_6 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.95 , SCREAMING_SNAKE_CASE=0.8 ) -> str:
'''simple docstring'''
with torch.no_grad():
__UpperCAmelCase = qa_sas_generate(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , temp=SCREAMING_SNAKE_CASE , top_p=SCREAMING_SNAKE_CASE , top_k=SCREAMING_SNAKE_CASE , max_input_length=1_0_2_4 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
A_ : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
A_ : Any = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
A_ : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
A_ : Dict = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
A_ : Tuple = st.sidebar.checkbox('Demo options')
if demo_options:
A_ : str = st.sidebar.selectbox(
'',
action_list,
index=3,
)
A_ : List[str] = action_list.index(action_st)
A_ : int = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
A_ : str = show_type == 'Show full text of passages'
else:
A_ : Union[str, Any] = 3
A_ : str = True
A_ : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
A_ : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
A_ : List[str] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
A_ : int = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
A_ : Optional[int] = 'wiki40b'
A_ : List[str] = 'dense'
A_ : List[str] = 'beam'
A_ : List[Any] = 2
A_ : Union[str, Any] = 64
A_ : Union[str, Any] = 256
A_ : Optional[int] = None
A_ : int = None
A_ : Any = st.sidebar.checkbox('Generation options')
if generate_options:
A_ : int = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
A_ : Union[str, Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
A_ : List[str] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
A_ : int = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
A_ : Union[str, Any] = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
A_ : Dict = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
A_ : Optional[Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
A_ : int = None
# start main text
A_ : str = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
A_ : Any = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
A_ : Optional[Any] = st.text_input('Enter your question here:', '')
else:
A_ : Optional[Any] = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
A_ , A_ : Tuple = make_support(question, source=wiki_source, method='dense', n_results=10)
A_ , A_ : Optional[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
A_ : Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
A_ : Optional[int] = support_list[:10]
A_ : str = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
A_ , A_ : Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
A_ , A_ : Dict = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
A_ : Optional[Any] = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
A_ : Optional[int] = res[1].strip()
if sec_titles == "":
A_ : List[Any] = '[{}]({})'.format(res[0], wiki_url)
else:
A_ : List[str] = sec_titles.split(' & ')
A_ : List[str] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
A_ : Any = find_nearest_training(question)
A_ : Any = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
A_ : Tuple = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
A_ : Dict = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 333 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = num_labels
__UpperCAmelCase = image_size
__UpperCAmelCase = layer_depths
__UpperCAmelCase = embed_dims
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = SwiftFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> Optional[int]:
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = SwiftFormerModelTester(self )
__UpperCAmelCase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase_ (self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = 8
self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
def _config_zero_init(lowercase__ ):
__UpperCAmelCase = copy.deepcopy(lowercase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase__ , lowercase__ , 1E-10 )
if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ):
__UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) )
setattr(lowercase__ , lowercase__ , lowercase__ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def __a ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 333 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : str = logging.get_logger(__name__)
A_ : str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : int = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 333 | 1 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A_ : Any = logging.get_logger(__name__)
@add_end_docstrings(_a )
class A_ ( _a ):
'''simple docstring'''
def __init__(self , *lowercase__ , **lowercase__ ) -> int:
super().__init__(*lowercase__ , **lowercase__ )
requires_backends(self , '''vision''' )
self.check_model_type(lowercase__ )
def __call__(self , lowercase__ , **lowercase__ ) -> Tuple:
return super().__call__(lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
return {}, {}, {}
def lowerCAmelCase_ (self , lowercase__ ) -> List[Any]:
__UpperCAmelCase = load_image(lowercase__ )
__UpperCAmelCase = image.size
__UpperCAmelCase = self.image_processor(images=lowercase__ , return_tensors=self.framework )
return model_inputs
def lowerCAmelCase_ (self , lowercase__ ) -> str:
__UpperCAmelCase = self.model(**lowercase__ )
return model_outputs
def lowerCAmelCase_ (self , lowercase__ ) -> Any:
__UpperCAmelCase = model_outputs.predicted_depth
__UpperCAmelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=lowercase__ )
__UpperCAmelCase = prediction.squeeze().cpu().numpy()
__UpperCAmelCase = (output * 255 / np.max(lowercase__ )).astype('''uint8''' )
__UpperCAmelCase = Image.fromarray(lowercase__ )
__UpperCAmelCase = {}
__UpperCAmelCase = predicted_depth
__UpperCAmelCase = depth
return output_dict
| 333 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 | 1 |
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def __a ( SCREAMING_SNAKE_CASE = 1_0_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase = 1
__UpperCAmelCase = 2
for i in range(2 , max_n + 1 ):
__UpperCAmelCase = pre_numerator
__UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1
__UpperCAmelCase = cur_numerator
__UpperCAmelCase = e_cont * pre_numerator + temp
return sum_digits(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import math
import qiskit
def __a ( SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
__UpperCAmelCase = qiskit.QuantumRegister(4 , '''qr''' )
__UpperCAmelCase = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
__UpperCAmelCase = [input_a, input_a, carry_in]
__UpperCAmelCase = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
__UpperCAmelCase = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCAmelCase = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_0_0_0 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 333 |
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
# edges = list of graph's edges
__UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase , __UpperCAmelCase = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333 | 1 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ (self ) -> Tuple:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowercase__ ):
__UpperCAmelCase = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
__UpperCAmelCase = FlaxAutoModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> List[str]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowercase__ ):
__UpperCAmelCase = AutoConfig.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
__UpperCAmelCase = FlaxAutoModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertIsInstance(lowercase__ , lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase__ )
__UpperCAmelCase = FlaxBertModel.from_pretrained(lowercase__ )
__UpperCAmelCase = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowercase__ ):
return model(**lowercase__ )
eval(**lowercase__ ).block_until_ready()
@slow
def lowerCAmelCase_ (self ) -> Dict:
for model_name in ["roberta-base", "roberta-large"]:
__UpperCAmelCase = AutoTokenizer.from_pretrained(lowercase__ )
__UpperCAmelCase = FlaxRobertaModel.from_pretrained(lowercase__ )
__UpperCAmelCase = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowercase__ ):
return model(**lowercase__ )
eval(**lowercase__ ).block_until_ready()
def lowerCAmelCase_ (self ) -> List[Any]:
with self.assertRaisesRegex(
lowercase__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__UpperCAmelCase = FlaxAutoModel.from_pretrained('''bert-base''' )
def lowerCAmelCase_ (self ) -> List[str]:
with self.assertRaisesRegex(
lowercase__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__UpperCAmelCase = FlaxAutoModel.from_pretrained(lowercase__ , revision='''aaaaaa''' )
def lowerCAmelCase_ (self ) -> Dict:
with self.assertRaisesRegex(
lowercase__ , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
__UpperCAmelCase = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowerCAmelCase_ (self ) -> Tuple:
with self.assertRaisesRegex(lowercase__ , '''Use `from_pt=True` to load this model''' ):
__UpperCAmelCase = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 333 |
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : int = ['a', 'b', 'c', 'd', 'e']
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
A_ : Tuple = topological_sort('a', [], [])
print(sort)
| 333 | 1 |
A_ : Dict = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def __a ( SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
assert type(SCREAMING_SNAKE_CASE ) in (int, float) and decimal == int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = ''''''
__UpperCAmelCase = False
if decimal < 0:
__UpperCAmelCase = True
decimal *= -1
while decimal > 0:
__UpperCAmelCase , __UpperCAmelCase = divmod(SCREAMING_SNAKE_CASE , 1_6 )
__UpperCAmelCase = values[remainder] + hexadecimal
__UpperCAmelCase = '''0x''' + hexadecimal
if negative:
__UpperCAmelCase = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333 | 1 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 333 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
model.train()
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
__UpperCAmelCase = RegressionModel()
__UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase = RegressionDataset(length=9_6 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 1 |
def __a ( SCREAMING_SNAKE_CASE = 1_0_0_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase = 2**power
__UpperCAmelCase = str(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = list(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = 0
for i in list_num:
sum_of_num += int(SCREAMING_SNAKE_CASE )
return sum_of_num
if __name__ == "__main__":
A_ : int = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
A_ : List[Any] = solution(power)
print('Sum of the digits is: ', result)
| 333 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
A_ : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333 | 1 |
from collections.abc import Callable
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
__UpperCAmelCase = a
__UpperCAmelCase = b
if function(SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__UpperCAmelCase = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) < 0:
__UpperCAmelCase = mid
else:
__UpperCAmelCase = mid
__UpperCAmelCase = start + (end - start) / 2.0
return mid
def __a ( SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [''''''.join(SCREAMING_SNAKE_CASE ) for row in temp_grid]
__UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE )
return output_string
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
__UpperCAmelCase = 0
for row in temp_grid: # fills in the characters
__UpperCAmelCase = input_string[counter : counter + len(SCREAMING_SNAKE_CASE )]
grid.append(list(SCREAMING_SNAKE_CASE ) )
counter += len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''''' # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __a ( SCREAMING_SNAKE_CASE ) -> dict[int, str]:
'''simple docstring'''
__UpperCAmelCase = {}
for key_guess in range(1 , len(SCREAMING_SNAKE_CASE ) ): # tries every key
__UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=3 , lowercase__=32 , lowercase__=3 , lowercase__=10 , lowercase__=[8, 16, 32, 64] , lowercase__=[1, 1, 2, 1] , lowercase__=True , lowercase__=True , lowercase__="relu" , lowercase__=3 , lowercase__=None , lowercase__=["stage2", "stage3", "stage4"] , lowercase__=[2, 3, 4] , lowercase__=1 , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embeddings_size
__UpperCAmelCase = hidden_sizes
__UpperCAmelCase = depths
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_act
__UpperCAmelCase = num_labels
__UpperCAmelCase = scope
__UpperCAmelCase = len(lowercase__ )
__UpperCAmelCase = out_features
__UpperCAmelCase = out_indices
__UpperCAmelCase = num_groups
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> List[str]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> Dict:
__UpperCAmelCase = BitModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> Any:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = BitForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = BitBackbone(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase = None
__UpperCAmelCase = BitBackbone(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a__ = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = BitModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ )
def lowerCAmelCase_ (self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase_ (self ) -> Optional[Any]:
return
@unittest.skip(reason='''Bit does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> Any:
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def lowerCAmelCase_ (self ) -> Tuple:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, module in model.named_modules():
if isinstance(lowercase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowercase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__UpperCAmelCase = layer_type
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def lowerCAmelCase_ (self ) -> Any:
pass
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = BitModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __a ( ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> Optional[Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
@require_torch
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = (BitBackbone,) if is_torch_available() else ()
a__ = BitConfig
a__ = False
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = BitModelTester(self )
| 333 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( _a , _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = StableUnCLIPPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ = False
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> List[Any]:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowercase__ )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=lowercase__ , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 1 |
import doctest
from collections import deque
import numpy as np
class A_ :
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = [2, 1, 2, -1]
__UpperCAmelCase = [1, 2, 3, 4]
def lowerCAmelCase_ (self ) -> list[float]:
__UpperCAmelCase = len(self.first_signal )
__UpperCAmelCase = len(self.second_signal )
__UpperCAmelCase = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 333 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int = logging.get_logger(__name__)
A_ : str = {'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["input_ids", "attention_mask"]
a__ = None
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowercase__ )
__UpperCAmelCase = add_prefix_space
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 333 | 1 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
set_seed(770)
A_ : List[Any] = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
A_ : List[str] = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
A_ : Tuple = os.path.dirname(os.path.abspath(__file__))
A_ : List[str] = os.path.join(os.path.expanduser('~'), '.cache')
A_ : Dict = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = model_type
if use_small:
key += "_small"
return os.path.join(SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]['''file_name'''] )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , local_dir=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="text" ) -> List[Any]:
'''simple docstring'''
if model_type == "text":
__UpperCAmelCase = BarkSemanticModel
__UpperCAmelCase = BarkSemanticConfig
__UpperCAmelCase = BarkSemanticGenerationConfig
elif model_type == "coarse":
__UpperCAmelCase = BarkCoarseModel
__UpperCAmelCase = BarkCoarseConfig
__UpperCAmelCase = BarkCoarseGenerationConfig
elif model_type == "fine":
__UpperCAmelCase = BarkFineModel
__UpperCAmelCase = BarkFineConfig
__UpperCAmelCase = BarkFineGenerationConfig
else:
raise NotImplementedError()
__UpperCAmelCase = f'''{model_type}_small''' if use_small else model_type
__UpperCAmelCase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(SCREAMING_SNAKE_CASE ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info['''repo_id'''] , model_info['''file_name'''] )
__UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
# this is a hack
__UpperCAmelCase = checkpoint['''model_args''']
if "input_vocab_size" not in model_args:
__UpperCAmelCase = model_args['''vocab_size''']
__UpperCAmelCase = model_args['''vocab_size''']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__UpperCAmelCase = model_args.pop('''n_head''' )
__UpperCAmelCase = model_args.pop('''n_embd''' )
__UpperCAmelCase = model_args.pop('''n_layer''' )
__UpperCAmelCase = ConfigClass(**checkpoint['''model_args'''] )
__UpperCAmelCase = ModelClass(config=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = GenerationConfigClass()
__UpperCAmelCase = model_generation_config
__UpperCAmelCase = checkpoint['''model''']
# fixup checkpoint
__UpperCAmelCase = '''_orig_mod.'''
for k, v in list(state_dict.items() ):
if k.startswith(SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
__UpperCAmelCase = k[len(SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
__UpperCAmelCase = new_k.replace(SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
__UpperCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = set(state_dict.keys() ) - set(model.state_dict().keys() )
__UpperCAmelCase = {k for k in extra_keys if not k.endswith('''.attn.bias''' )}
__UpperCAmelCase = set(model.state_dict().keys() ) - set(state_dict.keys() )
__UpperCAmelCase = {k for k in missing_keys if not k.endswith('''.attn.bias''' )}
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = checkpoint['''best_val_loss'''].item()
logger.info(f'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(SCREAMING_SNAKE_CASE , 3 )} loss''' )
model.eval()
model.to(SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="text" ) -> Optional[Any]:
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__UpperCAmelCase = '''cpu''' # do conversion on cpu
__UpperCAmelCase = _get_ckpt_path(SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = _load_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , model_type=SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
# load bark initial model
__UpperCAmelCase = _bark_load_model(SCREAMING_SNAKE_CASE , '''cpu''' , model_type=SCREAMING_SNAKE_CASE , use_small=SCREAMING_SNAKE_CASE )
if model_type == "text":
__UpperCAmelCase = bark_model['''model''']
if model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError('''initial and new models don\'t have the same number of parameters''' )
# check if same output as the bark model
__UpperCAmelCase = 5
__UpperCAmelCase = 1_0
if model_type in ["text", "coarse"]:
__UpperCAmelCase = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
__UpperCAmelCase = bark_model(SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
# take last logits
__UpperCAmelCase = output_new_model_total.logits[:, [-1], :]
else:
__UpperCAmelCase = 3
__UpperCAmelCase = 8
__UpperCAmelCase = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = bark_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('''initial and new outputs don\'t have the same shape''' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('''initial and new outputs are not equal''' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = BarkSemanticConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__UpperCAmelCase = BarkCoarseConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__UpperCAmelCase = BarkFineConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) )
__UpperCAmelCase = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' )
__UpperCAmelCase = BarkSemanticModel.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = BarkCoarseModel.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = BarkFineModel.from_pretrained(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = EncodecModel.from_pretrained('''facebook/encodec_24khz''' )
__UpperCAmelCase = BarkConfig.from_sub_model_configs(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__UpperCAmelCase = BarkModel(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = semantic
__UpperCAmelCase = coarseAcoustic
__UpperCAmelCase = fineAcoustic
__UpperCAmelCase = codec
__UpperCAmelCase = bark_generation_config
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
bark.save_pretrained(SCREAMING_SNAKE_CASE , repo_id=SCREAMING_SNAKE_CASE , push_to_hub=SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
A_ : Union[str, Any] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 333 |
import math
import sys
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
__UpperCAmelCase = [-1] * (number + 1)
__UpperCAmelCase = 0
for i in range(1 , number + 1 ):
__UpperCAmelCase = sys.maxsize
__UpperCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
for j in range(1 , root + 1 ):
__UpperCAmelCase = 1 + answers[i - (j**2)]
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
from collections.abc import Sequence
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> float:
'''simple docstring'''
if not arr:
return 0
__UpperCAmelCase = 0 if allow_empty_subarrays else float('''-inf''' )
__UpperCAmelCase = 0.0
for num in arr:
__UpperCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
__UpperCAmelCase = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ : str = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 333 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = b.T
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
__UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = x.reshape(-1 , 3 )
__UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class A_ ( _a ):
'''simple docstring'''
a__ = ["pixel_values"]
def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None:
super().__init__(**lowercase__ )
__UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256}
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_color_quantize
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ )
__UpperCAmelCase = image - 1
return image
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase = clusters if clusters is not None else self.clusters
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase = images.shape[0]
__UpperCAmelCase = images.reshape(lowercase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase = list(lowercase__ )
else:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__UpperCAmelCase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 333 | 1 |
import math
from datetime import datetime, timedelta
def __a ( SCREAMING_SNAKE_CASE ) -> datetime:
'''simple docstring'''
__UpperCAmelCase = year % 1_9
__UpperCAmelCase = year % 4
__UpperCAmelCase = year % 7
__UpperCAmelCase = math.floor(year / 1_0_0 )
__UpperCAmelCase = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
__UpperCAmelCase = leap_day_inhibits / 4
__UpperCAmelCase = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
__UpperCAmelCase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__UpperCAmelCase = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
__UpperCAmelCase = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE , 4 , 1_8 )
else:
return datetime(SCREAMING_SNAKE_CASE , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
A_ : Dict = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 333 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[int] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['PoolFormerFeatureExtractor']
A_ : Dict = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 333 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Any = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 333 |
import math
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_a ):
'''simple docstring'''
a__ = ["transformers", "torch", "note_seq"]
def __init__(self , *lowercase__ , **lowercase__ ) -> int:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def lowerCAmelCase_ (cls , *lowercase__ , **lowercase__ ) -> int:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def lowerCAmelCase_ (cls , *lowercase__ , **lowercase__ ) -> Any:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 333 |
def __a ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
A_ : Union[str, Any] = generate_large_matrix()
A_ : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase = (left + right) // 2
__UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333 | 1 |
from ...configuration_utils import PretrainedConfig
class A_ ( _a ):
'''simple docstring'''
a__ = "bert-generation"
def __init__(self , lowercase__=50_358 , lowercase__=1_024 , lowercase__=24 , lowercase__=16 , lowercase__=4_096 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=0.02 , lowercase__=1E-12 , lowercase__=0 , lowercase__=2 , lowercase__=1 , lowercase__="absolute" , lowercase__=True , **lowercase__ , ) -> List[str]:
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 333 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A_ : List[str] = sys.version_info >= (3, 10)
def __a ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
@dataclass
class A_ :
'''simple docstring'''
a__ = list_field(default=[] )
a__ = list_field(default=[1, 2, 3] )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
a__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A_ :
'''simple docstring'''
a__ = field()
a__ = field()
a__ = field()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field()
a__ = None
a__ = field(default="toto" , metadata={"help": "help message"} )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , lowercase__ ) and yy.get('''choices''' , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowercase__ ) , yy['''type'''](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--bar''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--baz''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--flag''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((__UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowercase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
__UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase_ (self ) -> str:
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
__UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--bar''' , default=lowercase__ , type=lowercase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowercase__ )
__UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
__UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--required_str''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
__UpperCAmelCase = parser.parse_dict(lowercase__ )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_json''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_yaml''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 333 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = ShapEPipeline
a__ = ["prompt"]
a__ = ["prompt"]
a__ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
a__ = False
@property
def lowerCAmelCase_ (self ) -> int:
return 32
@property
def lowerCAmelCase_ (self ) -> List[Any]:
return 32
@property
def lowerCAmelCase_ (self ) -> Tuple:
return self.time_input_dim * 4
@property
def lowerCAmelCase_ (self ) -> Any:
return 8
@property
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowerCAmelCase_ (self ) -> Any:
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(lowercase__ )
@property
def lowerCAmelCase_ (self ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCAmelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__UpperCAmelCase = PriorTransformer(**lowercase__ )
return model
@property
def lowerCAmelCase_ (self ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCAmelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__UpperCAmelCase = ShapERenderer(**lowercase__ )
return model
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.dummy_prior
__UpperCAmelCase = self.dummy_text_encoder
__UpperCAmelCase = self.dummy_tokenizer
__UpperCAmelCase = self.dummy_renderer
__UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=lowercase__ , clip_sample=lowercase__ , clip_sample_range=1.0 , )
__UpperCAmelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> Optional[int]:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = '''cpu'''
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**lowercase__ )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = pipe(**self.get_dummy_inputs(lowercase__ ) )
__UpperCAmelCase = output.images[0]
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__UpperCAmelCase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ (self ) -> Dict:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = torch_device == '''cpu'''
__UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase__ , relax_max_difference=lowercase__ , )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**lowercase__ )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = self.get_dummy_inputs(lowercase__ )
for key in inputs.keys():
if key in self.batch_params:
__UpperCAmelCase = batch_size * [inputs[key]]
__UpperCAmelCase = pipe(**lowercase__ , num_images_per_prompt=lowercase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
__UpperCAmelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(0 )
__UpperCAmelCase = pipe(
'''a shark''' , generator=lowercase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
| 333 |
import doctest
from collections import deque
import numpy as np
class A_ :
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = [2, 1, 2, -1]
__UpperCAmelCase = [1, 2, 3, 4]
def lowerCAmelCase_ (self ) -> list[float]:
__UpperCAmelCase = len(self.first_signal )
__UpperCAmelCase = len(self.second_signal )
__UpperCAmelCase = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 333 | 1 |
import argparse
A_ : Optional[Any] = 'docs/source/_static/js/custom.js'
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
__UpperCAmelCase = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
__UpperCAmelCase = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
A_ : Dict = parser.parse_args()
update_custom_js(args.version)
| 333 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( _a ):
'''simple docstring'''
a__ = "pegasus"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def lowerCAmelCase_ (self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ (self ) -> int:
return self.d_model
| 333 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
A_ : Dict = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class A_ ( _a ):
'''simple docstring'''
a__ = "albert"
def __init__(self , lowercase__=30_000 , lowercase__=128 , lowercase__=4_096 , lowercase__=12 , lowercase__=1 , lowercase__=64 , lowercase__=16_384 , lowercase__=1 , lowercase__="gelu_new" , lowercase__=0 , lowercase__=0 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1E-12 , lowercase__=0.1 , lowercase__="absolute" , lowercase__=0 , lowercase__=2 , lowercase__=3 , **lowercase__ , ) -> List[Any]:
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = embedding_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_hidden_groups
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = inner_group_num
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = classifier_dropout_prob
__UpperCAmelCase = position_embedding_type
class A_ ( _a ):
'''simple docstring'''
@property
def lowerCAmelCase_ (self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 333 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = LongformerTokenizer
a__ = True
a__ = LongformerTokenizerFast
a__ = True
def lowerCAmelCase_ (self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCAmelCase = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def lowerCAmelCase_ (self , **lowercase__ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True)
self.assertListEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokens + [tokenizer.unk_token]
__UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = '''Encode this sequence.'''
__UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase__ , lowercase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
# Testing spaces after special tokens
__UpperCAmelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
__UpperCAmelCase = '''Encode <mask> sequence'''
__UpperCAmelCase = '''Encode <mask>sequence'''
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
pass
def lowerCAmelCase_ (self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
__UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase_ (self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}'''
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
| 333 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( _a , _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = StableUnCLIPPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ = False
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> List[Any]:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowercase__ )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=lowercase__ , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_000}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = 10
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ (self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 333 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A_ : Optional[Any] = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
A_ : Dict = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('utf-8').split()
)
A_ : Optional[Any] = '|'.join(sys.argv[1:])
A_ : Optional[Any] = re.compile(RF"""^({joined_dirs}).*?\.py$""")
A_ : Any = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 333 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = num_labels
__UpperCAmelCase = image_size
__UpperCAmelCase = layer_depths
__UpperCAmelCase = embed_dims
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = SwiftFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> Optional[int]:
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = SwiftFormerModelTester(self )
__UpperCAmelCase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase_ (self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = 8
self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
def _config_zero_init(lowercase__ ):
__UpperCAmelCase = copy.deepcopy(lowercase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase__ , lowercase__ , 1E-10 )
if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ):
__UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) )
setattr(lowercase__ , lowercase__ , lowercase__ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def __a ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 333 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : int = logging.get_logger(__name__)
A_ : Dict = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class A_ ( _a ):
'''simple docstring'''
a__ = "mvp"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , lowercase__=50_267 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0.0 , lowercase__=False , lowercase__=True , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__=True , lowercase__=2 , lowercase__=2 , lowercase__=False , lowercase__=100 , lowercase__=800 , **lowercase__ , ) -> List[str]:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = classifier_dropout
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase = use_prompt
__UpperCAmelCase = prompt_length
__UpperCAmelCase = prompt_mid_dim
super().__init__(
pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , lowercase__ ):
__UpperCAmelCase = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'''The config can simply be saved and uploaded again to be fixed.''' )
| 333 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : str = logging.get_logger(__name__)
A_ : str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : int = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 333 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Tuple = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
A_ : Union[str, Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
A_ : int = {'facebook/blenderbot_small-90M': 512}
def __a ( SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = set()
__UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase = char
__UpperCAmelCase = set(SCREAMING_SNAKE_CASE )
return pairs
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__(self , lowercase__ , lowercase__ , lowercase__="__start__" , lowercase__="__end__" , lowercase__="__unk__" , lowercase__="__null__" , **lowercase__ , ) -> int:
super().__init__(unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , **lowercase__ )
with open(lowercase__ , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(lowercase__ )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(lowercase__ , encoding='''utf-8''' ) as merges_handle:
__UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
__UpperCAmelCase = [tuple(merge.split() ) for merge in merges]
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = {}
@property
def lowerCAmelCase_ (self ) -> int:
return len(self.encoder )
def lowerCAmelCase_ (self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ (self , lowercase__ ) -> str:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase = re.sub('''([.,!?()])''' , R''' \1''' , lowercase__ )
__UpperCAmelCase = re.sub('''(\')''' , R''' \1 ''' , lowercase__ )
__UpperCAmelCase = re.sub(R'''\s{2,}''' , ''' ''' , lowercase__ )
if "\n" in token:
__UpperCAmelCase = token.replace('''\n''' , ''' __newln__''' )
__UpperCAmelCase = token.split(''' ''' )
__UpperCAmelCase = []
for token in tokens:
if not len(lowercase__ ):
continue
__UpperCAmelCase = token.lower()
__UpperCAmelCase = tuple(lowercase__ )
__UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__UpperCAmelCase = get_pairs(lowercase__ )
if not pairs:
words.append(lowercase__ )
continue
while True:
__UpperCAmelCase = min(lowercase__ , key=lambda lowercase__ : self.bpe_ranks.get(lowercase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase = bigram
__UpperCAmelCase = []
__UpperCAmelCase = 0
while i < len(lowercase__ ):
try:
__UpperCAmelCase = word.index(lowercase__ , lowercase__ )
new_word.extend(word[i:j] )
__UpperCAmelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowercase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase = tuple(lowercase__ )
__UpperCAmelCase = new_word
if len(lowercase__ ) == 1:
break
else:
__UpperCAmelCase = get_pairs(lowercase__ )
__UpperCAmelCase = '''@@ '''.join(lowercase__ )
__UpperCAmelCase = word[:-4]
__UpperCAmelCase = word
words.append(lowercase__ )
return " ".join(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> List[str]:
__UpperCAmelCase = []
__UpperCAmelCase = re.findall(R'''\S+\n?''' , lowercase__ )
for token in words:
split_tokens.extend(list(self.bpe(lowercase__ ).split(''' ''' ) ) )
return split_tokens
def lowerCAmelCase_ (self , lowercase__ ) -> int:
__UpperCAmelCase = token.lower()
return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ (self , lowercase__ ) -> str:
return self.decoder.get(lowercase__ , self.unk_token )
def lowerCAmelCase_ (self , lowercase__ ) -> str:
__UpperCAmelCase = ''' '''.join(lowercase__ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowercase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__ ) + '''\n''' )
__UpperCAmelCase = 0
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__UpperCAmelCase = token_index
writer.write(''' '''.join(lowercase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 333 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 | 1 |
def __a ( SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
__UpperCAmelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import math
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
# edges = list of graph's edges
__UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase , __UpperCAmelCase = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333 | 1 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=2 , lowercase__=99 , lowercase__=0 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=2 , lowercase__=4 , lowercase__="last" , lowercase__=True , lowercase__=None , lowercase__=0 , ) -> Union[str, Any]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_lengths
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = gelu_activation
__UpperCAmelCase = sinusoidal_embeddings
__UpperCAmelCase = causal
__UpperCAmelCase = asm
__UpperCAmelCase = n_langs
__UpperCAmelCase = vocab_size
__UpperCAmelCase = n_special
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = summary_type
__UpperCAmelCase = use_proj
__UpperCAmelCase = scope
__UpperCAmelCase = bos_token_id
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_input_lengths:
__UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase_ (self ) -> Tuple:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Union[str, Any]:
__UpperCAmelCase = XLMModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , lengths=lowercase__ , langs=lowercase__ )
__UpperCAmelCase = model(lowercase__ , langs=lowercase__ )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Tuple:
__UpperCAmelCase = XLMWithLMHeadModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Any:
__UpperCAmelCase = XLMForQuestionAnsweringSimple(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
__UpperCAmelCase = model(lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ )
__UpperCAmelCase = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> str:
__UpperCAmelCase = XLMForQuestionAnswering(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
__UpperCAmelCase = model(
lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , cls_index=lowercase__ , is_impossible=lowercase__ , p_mask=lowercase__ , )
__UpperCAmelCase = model(
lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ , cls_index=lowercase__ , is_impossible=lowercase__ , )
((__UpperCAmelCase) , ) = result_with_labels.to_tuple()
__UpperCAmelCase = model(lowercase__ , start_positions=lowercase__ , end_positions=lowercase__ )
((__UpperCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> str:
__UpperCAmelCase = XLMForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = XLMForTokenClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> List[Any]:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = XLMForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
a__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a__ = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__=False ) -> Tuple:
__UpperCAmelCase = super()._prepare_for_class(lowercase__ , lowercase__ , return_labels=lowercase__ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase__ )
return inputs_dict
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = XLMModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=lowercase__ , emb_dim=37 )
def lowerCAmelCase_ (self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowercase__ )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=False , lowercase__=1 ) -> Dict:
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertListEqual(
[isinstance(lowercase__ , lowercase__ ) for iter_attentions in attentions] , [True] * len(lowercase__ ) )
self.assertEqual(len(lowercase__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowercase__ ):
# adds PAD dummy token
__UpperCAmelCase = min_length + idx + 1
__UpperCAmelCase = min_length + idx + 1
__UpperCAmelCase = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowercase__ ) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=False , lowercase__=1 ) -> str:
self.assertIsInstance(lowercase__ , lowercase__ )
self.assertListEqual(
[isinstance(lowercase__ , lowercase__ ) for iter_hidden_states in hidden_states] , [True] * len(lowercase__ ) , )
self.assertEqual(len(lowercase__ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowercase__ ):
# adds PAD dummy token
__UpperCAmelCase = min_length + idx + 1
__UpperCAmelCase = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowercase__ ) , )
pass
@slow
def lowerCAmelCase_ (self ) -> Dict:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = XLMModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(lowercase__ )
__UpperCAmelCase = torch.tensor([[14, 447]] , dtype=torch.long , device=lowercase__ ) # the president
__UpperCAmelCase = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__UpperCAmelCase = model.generate(lowercase__ , do_sample=lowercase__ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowercase__ )
| 333 |
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : int = ['a', 'b', 'c', 'd', 'e']
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
A_ : Tuple = topological_sort('a', [], [])
print(sort)
| 333 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Any = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class A_ ( _a ):
'''simple docstring'''
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__(self , lowercase__=50_277 , lowercase__=1_024 , lowercase__=4_096 , lowercase__=32 , lowercase__=None , lowercase__=None , lowercase__=1E-5 , lowercase__=0 , lowercase__=0 , lowercase__=6 , lowercase__=False , lowercase__=True , **lowercase__ , ) -> Tuple:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = context_length
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
__UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = rescale_every
__UpperCAmelCase = use_cache
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
| 333 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A_ : Optional[int] = random.Random()
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> int:
'''simple docstring'''
if rng is None:
__UpperCAmelCase = global_rng
__UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=7 , lowercase__=400 , lowercase__=2_000 , lowercase__=1 , lowercase__=0.0 , lowercase__=16_000 , lowercase__=True , lowercase__=True , ) -> Optional[int]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = min_seq_length
__UpperCAmelCase = max_seq_length
__UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase = feature_size
__UpperCAmelCase = padding_value
__UpperCAmelCase = sampling_rate
__UpperCAmelCase = return_attention_mask
__UpperCAmelCase = do_normalize
def lowerCAmelCase_ (self ) -> int:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase_ (self , lowercase__=False , lowercase__=False ) -> List[str]:
def _flatten(lowercase__ ):
return list(itertools.chain(*lowercase__ ) )
if equal_length:
__UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase = [np.asarray(lowercase__ ) for x in speech_inputs]
return speech_inputs
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = WavaVecaFeatureExtractor
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = WavaVecaFeatureExtractionTester(self )
def lowerCAmelCase_ (self , lowercase__ ) -> Optional[Any]:
self.assertTrue(np.all(np.mean(lowercase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase__ , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase_ (self ) -> int:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__UpperCAmelCase = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
# Test not batched input
__UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
# Test batched
__UpperCAmelCase = feat_extract(lowercase__ , return_tensors='''np''' ).input_values
__UpperCAmelCase = feat_extract(lowercase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase__ , lowercase__ ):
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCAmelCase = np.asarray(lowercase__ )
__UpperCAmelCase = feat_extract(lowercase__ , return_tensors='''np''' ).input_values
__UpperCAmelCase = feat_extract(lowercase__ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase__ , lowercase__ ):
self.assertTrue(np.allclose(lowercase__ , lowercase__ , atol=1E-3 ) )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
__UpperCAmelCase = [None, 1_600, None]
for max_length, padding in zip(lowercase__ , lowercase__ ):
__UpperCAmelCase = feat_extract(lowercase__ , padding=lowercase__ , max_length=lowercase__ , return_tensors='''np''' )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = range(800 , 1_400 , 200 )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
__UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
__UpperCAmelCase = [None, 1_600, None]
for max_length, padding in zip(lowercase__ , lowercase__ ):
__UpperCAmelCase = feat_extract(lowercase__ , max_length=lowercase__ , padding=lowercase__ )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__UpperCAmelCase = feat_extract(
lowercase__ , truncation=lowercase__ , max_length=1_000 , padding='''max_length''' , return_tensors='''np''' )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__UpperCAmelCase = feat_extract(
lowercase__ , truncation=lowercase__ , max_length=1_000 , padding='''longest''' , return_tensors='''np''' )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__UpperCAmelCase = feat_extract(
lowercase__ , truncation=lowercase__ , max_length=2_000 , padding='''longest''' , return_tensors='''np''' )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
@require_torch
def lowerCAmelCase_ (self ) -> Optional[int]:
import torch
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = np.random.rand(100 ).astype(np.floataa )
__UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowerCAmelCase_ (self ) -> Optional[Any]:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__UpperCAmelCase = WavaVecaConfig.from_pretrained(lowercase__ )
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(lowercase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
| 333 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
model.train()
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
__UpperCAmelCase = RegressionModel()
__UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase = RegressionDataset(length=9_6 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 1 |
import os
from distutils.util import strtobool
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
for e in env_keys:
__UpperCAmelCase = int(os.environ.get(SCREAMING_SNAKE_CASE , -1 ) )
if val >= 0:
return val
return default
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = os.environ.get(SCREAMING_SNAKE_CASE , str(SCREAMING_SNAKE_CASE ) )
return strtobool(SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int...
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="no" ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = os.environ.get(SCREAMING_SNAKE_CASE , str(SCREAMING_SNAKE_CASE ) )
return value
| 333 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
A_ : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = None
a__ = BloomTokenizerFast
a__ = BloomTokenizerFast
a__ = True
a__ = False
a__ = "tokenizer_file"
a__ = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def lowerCAmelCase_ (self ) -> List[str]:
super().setUp()
__UpperCAmelCase = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ (self , **lowercase__ ) -> str:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
__UpperCAmelCase = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
__UpperCAmelCase = tokenizer.batch_encode_plus(lowercase__ )['''input_ids''']
self.assertListEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.batch_decode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self , lowercase__=6 ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__UpperCAmelCase = '''This is a simple input'''
__UpperCAmelCase = ['''This is a simple input 1''', '''This is a simple input 2''']
__UpperCAmelCase = ('''This is a simple input''', '''This is a pair''')
__UpperCAmelCase = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(lowercase__ , max_length=lowercase__ )
tokenizer_r.encode_plus(lowercase__ , max_length=lowercase__ )
tokenizer_r.batch_encode_plus(lowercase__ , max_length=lowercase__ )
tokenizer_r.encode(lowercase__ , max_length=lowercase__ )
tokenizer_r.batch_encode_plus(lowercase__ , max_length=lowercase__ )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
__UpperCAmelCase = None # Hotfixing padding = None
self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding='''max_length''' )
# Simple input
self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding='''max_length''' )
# Simple input
self.assertRaises(
lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding='''max_length''' )
# Pair input
self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding='''max_length''' )
# Pair input
self.assertRaises(
lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding='''max_length''' , )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=lowercase__ )
__UpperCAmelCase = next(iter(lowercase__ ) )['''premise'''] # pick up one data
__UpperCAmelCase = list(sample_data.values() )
__UpperCAmelCase = list(map(tokenizer.encode , lowercase__ ) )
__UpperCAmelCase = [tokenizer.decode(lowercase__ , clean_up_tokenization_spaces=lowercase__ ) for x in output_tokens]
self.assertListEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> str:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 333 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [''''''.join(SCREAMING_SNAKE_CASE ) for row in temp_grid]
__UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE )
return output_string
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
__UpperCAmelCase = 0
for row in temp_grid: # fills in the characters
__UpperCAmelCase = input_string[counter : counter + len(SCREAMING_SNAKE_CASE )]
grid.append(list(SCREAMING_SNAKE_CASE ) )
counter += len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''''' # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __a ( SCREAMING_SNAKE_CASE ) -> dict[int, str]:
'''simple docstring'''
__UpperCAmelCase = {}
for key_guess in range(1 , len(SCREAMING_SNAKE_CASE ) ): # tries every key
__UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def __a ( SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
return quad(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , args=(SCREAMING_SNAKE_CASE) )[0]
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 333 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( _a , _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = StableUnCLIPPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ = False
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> List[Any]:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowercase__ )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=lowercase__ , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 1 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
while second != 0:
__UpperCAmelCase = first & second
first ^= second
__UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input('Enter the first number: ').strip())
A_ : int = int(input('Enter the second number: ').strip())
print(F"""{add(first, second) = }""")
| 333 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int = logging.get_logger(__name__)
A_ : str = {'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["input_ids", "attention_mask"]
a__ = None
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowercase__ )
__UpperCAmelCase = add_prefix_space
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 333 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Dict = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class A_ ( _a ):
'''simple docstring'''
a__ = "trajectory_transformer"
a__ = ["past_key_values"]
a__ = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , lowercase__=100 , lowercase__=5 , lowercase__=1 , lowercase__=1 , lowercase__=249 , lowercase__=6 , lowercase__=17 , lowercase__=25 , lowercase__=4 , lowercase__=4 , lowercase__=128 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0006 , lowercase__=512 , lowercase__=0.02 , lowercase__=1E-12 , lowercase__=1 , lowercase__=True , lowercase__=1 , lowercase__=50_256 , lowercase__=50_256 , **lowercase__ , ) -> Union[str, Any]:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = action_weight
__UpperCAmelCase = reward_weight
__UpperCAmelCase = value_weight
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = block_size
__UpperCAmelCase = action_dim
__UpperCAmelCase = observation_dim
__UpperCAmelCase = transition_dim
__UpperCAmelCase = learning_rate
__UpperCAmelCase = n_layer
__UpperCAmelCase = n_head
__UpperCAmelCase = n_embd
__UpperCAmelCase = embd_pdrop
__UpperCAmelCase = attn_pdrop
__UpperCAmelCase = resid_pdrop
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = kaiming_initializer_range
__UpperCAmelCase = use_cache
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
| 333 |
import math
import sys
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
__UpperCAmelCase = [-1] * (number + 1)
__UpperCAmelCase = 0
for i in range(1 , number + 1 ):
__UpperCAmelCase = sys.maxsize
__UpperCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
for j in range(1 , root + 1 ):
__UpperCAmelCase = 1 + answers[i - (j**2)]
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase__ = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCAmelCase__ = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase__ = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase__ = sorted(arg_to_scheduler.keys())
UpperCAmelCase__ = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class lowercase_ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : str , __UpperCAmelCase : argparse.Namespace , __UpperCAmelCase : str=None , __UpperCAmelCase : List[str]="base" , __UpperCAmelCase : str=None , __UpperCAmelCase : int=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[int] , ) ->Tuple:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
a = 0
a = Path(self.hparams.output_dir )
a = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
a = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
a = config
a = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), F"""model config doesn't have a `{p}` attribute"""
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
a = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
a = tokenizer
a = MODEL_MODES[mode]
if model is None:
a = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
a = model
def __lowerCAmelCase ( self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : str ) ->str:
"""simple docstring"""
a = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
a = arg_to_scheduler[self.hparams.lr_scheduler]
a = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
a = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.model
a = ['''bias''', '''LayerNorm.weight''']
a = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
a = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
a = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
a = optimizer
a = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) ->Dict:
"""simple docstring"""
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] ) ->Any:
"""simple docstring"""
return self.validation_end(__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
a = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Dict ) ->Optional[int]:
"""simple docstring"""
if stage == "test":
a = len(self.test_dataloader().dataset )
else:
a = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
a = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : bool = False ) ->str:
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return self.train_loader
def __lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Dict[str, Any] ) ->None:
"""simple docstring"""
a = self.output_dir.joinpath('''best_tfmr''' )
a = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : Dict , __UpperCAmelCase : int ) ->int:
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=__UpperCAmelCase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(__UpperCAmelCase ).parent / '''test_run''' / '''cache''' ) , type=__UpperCAmelCase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=__UpperCAmelCase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=__UpperCAmelCase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=__UpperCAmelCase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=__UpperCAmelCase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=__UpperCAmelCase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__UpperCAmelCase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__UpperCAmelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=__UpperCAmelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=__UpperCAmelCase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=__UpperCAmelCase )
parser.add_argument('''--train_batch_size''' , default=32 , type=__UpperCAmelCase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=__UpperCAmelCase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowercase_ ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) ->int:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowercase_ ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class lowercase_ ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ) ->int:
"""simple docstring"""
a = trainer.lr_schedulers[0]['''scheduler''']
a = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule ) ->Union[str, Any]:
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
a = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule ) ->Optional[int]:
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
a = trainer.callback_metrics
# Log and save results to file
a = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(__UpperCAmelCase , '''w''' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) )
def _a ( a :Union[str, Any] , a :int ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=a , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def _a ( a :BaseTransformer , a :argparse.Namespace , a :Tuple=None , a :Any=True , a :List[str]=[] , a :List[Any]=None , a :Union[str, Any]=None , **a :Optional[Any] , ) -> List[str]:
pl.seed_everything(args.seed )
# init model
a = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=a )
# add custom checkpoints
if checkpoint_callback is None:
a = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(a )
if logging_callback is None:
a = LoggingCallback()
a = {}
if args.fpaa:
a = 16
if args.gpus > 1:
a = '''auto'''
a = '''ddp'''
a = args.accumulate_grad_batches
a = None
a = '''auto'''
a = pl.Trainer.from_argparse_args(
a , weights_summary=a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a , val_check_interval=1 , num_sanity_val_steps=2 , **a , )
if args.do_train:
trainer.fit(a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = b.T
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
__UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = x.reshape(-1 , 3 )
__UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class A_ ( _a ):
'''simple docstring'''
a__ = ["pixel_values"]
def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None:
super().__init__(**lowercase__ )
__UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256}
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_color_quantize
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ )
__UpperCAmelCase = image - 1
return image
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase = clusters if clusters is not None else self.clusters
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase = images.shape[0]
__UpperCAmelCase = images.reshape(lowercase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase = list(lowercase__ )
else:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__UpperCAmelCase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 333 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : str ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(snake_case_ )
for i in range(n - 1 ):
for j in range(i + 1 , snake_case_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowerCAmelCase_ ( snake_case_ : str ) -> Tuple:
'''simple docstring'''
if len(snake_case_ ) <= 1:
return arr, 0
UpperCAmelCase_ = len(snake_case_ ) // 2
UpperCAmelCase_ = arr[0:mid]
UpperCAmelCase_ = arr[mid:]
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = _count_cross_inversions(snake_case_ , snake_case_ )
UpperCAmelCase_ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = 0
while i < len(snake_case_ ) and j < len(snake_case_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(snake_case_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(snake_case_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowerCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase_ = count_inversions_bf(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(snake_case_ )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , snake_case_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase_ = count_inversions_bf(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(snake_case_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , snake_case_ )
# an empty list should also have zero inversions
UpperCAmelCase_ = []
UpperCAmelCase_ = count_inversions_bf(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = count_inversions_recursive(snake_case_ )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , snake_case_ )
if __name__ == "__main__":
main()
| 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[int] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['PoolFormerFeatureExtractor']
A_ : Dict = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 333 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Union[str, Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2 |
import math
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[Any] = [[float('''inf''' ) for _ in range(snake_case__ )] for _ in range(snake_case__ )]
for i in range(snake_case__ ):
for j in range(snake_case__ ):
A : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(snake_case__ ):
# looping through rows of graph array
for i in range(snake_case__ ):
# looping through columns of graph array
for j in range(snake_case__ ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A : List[str] = dist[i][k] + dist[k][j]
_print_dist(snake_case__ , snake_case__ )
return dist, v
if __name__ == "__main__":
lowercase : Dict = int(input('Enter number of vertices: '))
lowercase : Union[str, Any] = int(input('Enter number of edges: '))
lowercase : int = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
lowercase : Any = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
lowercase : Union[str, Any] = int(input('Enter source:'))
lowercase : Optional[int] = int(input('Enter destination:'))
lowercase : Tuple = float(input('Enter weight:'))
lowercase : List[str] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 3 |
def __a ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
A_ : Union[str, Any] = generate_large_matrix()
A_ : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase = (left + right) // 2
__UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__snake_case =3
def a_ ( lowerCamelCase : int ):
print('Generating primitive root of p' )
while True:
lowerCAmelCase = random.randrange(3 , lowerCamelCase )
if pow(lowerCamelCase , 2 , lowerCamelCase ) == 1:
continue
if pow(lowerCamelCase , lowerCamelCase , lowerCamelCase ) == 1:
continue
return g
def a_ ( lowerCamelCase : int ):
print('Generating prime p...' )
lowerCAmelCase = rabin_miller.generate_large_prime(lowerCamelCase ) # select large prime number.
lowerCAmelCase = primitive_root(lowerCamelCase ) # one primitive root on modulo p.
lowerCAmelCase = random.randrange(3 , lowerCamelCase ) # private_key -> have to be greater than 2 for safety.
lowerCAmelCase = cryptomath.find_mod_inverse(pow(lowerCamelCase , lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
lowerCAmelCase = (key_size, e_a, e_a, p)
lowerCAmelCase = (key_size, d)
return public_key, private_key
def a_ ( lowerCamelCase : str , lowerCamelCase : int ):
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
lowerCAmelCase , lowerCAmelCase = generate_key(lowerCamelCase )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , 'w' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , 'w' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def a_ ( ):
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 4 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A_ : List[str] = sys.version_info >= (3, 10)
def __a ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
@dataclass
class A_ :
'''simple docstring'''
a__ = list_field(default=[] )
a__ = list_field(default=[1, 2, 3] )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
a__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A_ :
'''simple docstring'''
a__ = field()
a__ = field()
a__ = field()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field()
a__ = None
a__ = field(default="toto" , metadata={"help": "help message"} )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , lowercase__ ) and yy.get('''choices''' , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowercase__ ) , yy['''type'''](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--bar''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--baz''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--flag''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((__UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowercase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
__UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase_ (self ) -> str:
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
__UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--bar''' , default=lowercase__ , type=lowercase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowercase__ )
__UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
__UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--required_str''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
__UpperCAmelCase = parser.parse_dict(lowercase__ )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_json''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_yaml''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 333 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''vit'''
def __init__(self , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=2_2_4 , UpperCAmelCase=1_6 , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=1_6 , **UpperCAmelCase , ) -> List[str]:
super().__init__(**UpperCAmelCase )
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =qkv_bias
_lowercase =encoder_stride
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = version.parse('''1.11''')
@property
def __A (self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __A (self ) -> float:
return 1e-4
| 5 |
import doctest
from collections import deque
import numpy as np
class A_ :
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = [2, 1, 2, -1]
__UpperCAmelCase = [1, 2, 3, 4]
def lowerCAmelCase_ (self ) -> list[float]:
__UpperCAmelCase = len(self.first_signal )
__UpperCAmelCase = len(self.second_signal )
__UpperCAmelCase = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 333 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
A : str = TypeVar('T')
A : Dict = Union[List[T], Tuple[T, ...]]
A : Union[str, Any] = Union[T, List[T], Dict[str, T]]
A : int = Union[str, bytes, os.PathLike] | 6 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( _a ):
'''simple docstring'''
a__ = "pegasus"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def lowerCAmelCase_ (self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ (self ) -> int:
return self.d_model
| 333 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
lowercase_ = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = LongformerTokenizer
a__ = True
a__ = LongformerTokenizerFast
a__ = True
def lowerCAmelCase_ (self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCAmelCase = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def lowerCAmelCase_ (self , **lowercase__ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True)
self.assertListEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokens + [tokenizer.unk_token]
__UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = '''Encode this sequence.'''
__UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase__ , lowercase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
# Testing spaces after special tokens
__UpperCAmelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
__UpperCAmelCase = '''Encode <mask> sequence'''
__UpperCAmelCase = '''Encode <mask>sequence'''
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
pass
def lowerCAmelCase_ (self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
__UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase_ (self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}'''
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
| 333 | 0 |
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 200 ):
snake_case_ = [1, 2, 5, 10, 20, 50, 100, 200]
snake_case_ = [0] * (pence + 1)
snake_case_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(SCREAMING_SNAKE_CASE__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82 | 8 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_000}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = 10
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ (self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 333 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PegasusConfig
SCREAMING_SNAKE_CASE__ : Tuple = {}
SCREAMING_SNAKE_CASE__ : Tuple = '''gelu'''
def __init__( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Union[str, Any]=7 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :Tuple=99 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Union[str, Any]=4 , lowerCAmelCase__ :List[str]=37 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :str=40 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :List[str]=1 , lowerCAmelCase__ :str=0 , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = parent
__SCREAMING_SNAKE_CASE : List[str] = batch_size
__SCREAMING_SNAKE_CASE : Dict = seq_length
__SCREAMING_SNAKE_CASE : int = is_training
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Any = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = eos_token_id
__SCREAMING_SNAKE_CASE : List[str] = pad_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = bos_token_id
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE : str = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE : Tuple = prepare_pegasus_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def __magic_name__( self :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = TFPegasusModel(config=lowerCAmelCase__ ).get_decoder()
__SCREAMING_SNAKE_CASE : List[Any] = inputs_dict['''input_ids''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids[:1, :]
__SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
__SCREAMING_SNAKE_CASE : str = inputs_dict['''head_mask''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
# first forward pass
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
__SCREAMING_SNAKE_CASE : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__SCREAMING_SNAKE_CASE : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
if attention_mask is None:
__SCREAMING_SNAKE_CASE : str = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[str] = TFPegasusModelTester(self )
__SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
SCREAMING_SNAKE_CASE__ : int = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''google/pegasus-xsum'''
@cached_property
def __magic_name__( self :Tuple ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__( self :List[str] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__( self :Union[str, Any] , **lowerCAmelCase__ :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.translate_src_text(**lowerCAmelCase__ )
assert self.expected_text == generated_words
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.src_text , **lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''tf''' )
__SCREAMING_SNAKE_CASE : str = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Any = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase__ )
return generated_words
@slow
def __magic_name__( self :Tuple ) -> int:
self._assert_generated_batch_equal_expected()
| 9 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = num_labels
__UpperCAmelCase = image_size
__UpperCAmelCase = layer_depths
__UpperCAmelCase = embed_dims
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = SwiftFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> Optional[int]:
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = SwiftFormerModelTester(self )
__UpperCAmelCase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase_ (self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = 8
self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
def _config_zero_init(lowercase__ ):
__UpperCAmelCase = copy.deepcopy(lowercase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase__ , lowercase__ , 1E-10 )
if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ):
__UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) )
setattr(lowercase__ , lowercase__ , lowercase__ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def __a ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 333 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : ArgumentParser) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] =parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to the HuggingFace Datasets folder.")
train_parser.set_defaults(func=UpperCAmelCase_)
def __init__(self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , *UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: str =get_logger("datasets-cli/converting")
lowerCamelCase__: Tuple =tfds_path
lowerCamelCase__: Union[str, Any] =datasets_directory
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
if os.path.isdir(self._tfds_path):
lowerCamelCase__: Dict =os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
lowerCamelCase__: Any =os.path.dirname(self._tfds_path)
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
lowerCamelCase__: Any =os.path.abspath(self._datasets_directory)
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""")
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Optional[int] =[]
lowerCamelCase__: int ={}
if os.path.isdir(self._tfds_path):
lowerCamelCase__: Tuple =os.listdir(UpperCAmelCase_)
else:
lowerCamelCase__: int =[os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""")
lowerCamelCase__: Tuple =os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[str] =os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
if not os.path.isfile(UpperCAmelCase_) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file")
continue
with open(UpperCAmelCase_ , encoding="utf-8") as f:
lowerCamelCase__: Union[str, Any] =f.readlines()
lowerCamelCase__: int =[]
lowerCamelCase__: Any =False
lowerCamelCase__: int =False
lowerCamelCase__: int =[]
for line in lines:
lowerCamelCase__: List[Any] =line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowerCamelCase__: List[Any] ="import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowerCamelCase__: Union[str, Any] =""
continue
elif "from absl import logging" in out_line:
lowerCamelCase__: Tuple ="from datasets import logging\n"
elif "getLogger" in out_line:
lowerCamelCase__: List[str] =out_line.replace("getLogger" , "get_logger")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
lowerCamelCase__: str =True
lowerCamelCase__: List[Any] =list(filter(lambda UpperCAmelCase_: e in out_line , UpperCAmelCase_))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCAmelCase_) + "\n")
out_lines.append(UpperCAmelCase_)
out_lines.append(UpperCAmelCase_)
continue
else:
for pattern, replacement in TO_CONVERT:
lowerCamelCase__: Dict =re.sub(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowerCamelCase__: Any =re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , UpperCAmelCase_)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
lowerCamelCase__: Any ="from . import " + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""")
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowerCamelCase__: Optional[int] =True
out_lines.append(UpperCAmelCase_)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowerCamelCase__: Tuple =f_name.replace(".py" , "")
lowerCamelCase__: Optional[int] =os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
self._logger.info(F"""Adding directory {output_dir}""")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(UpperCAmelCase_)
if needs_manual_update:
with_manual_update.append(UpperCAmelCase_)
with open(UpperCAmelCase_ , "w" , encoding="utf-8") as f:
f.writelines(UpperCAmelCase_)
self._logger.info(F"""Converted in {output_file}""")
for utils_file in utils_files:
try:
lowerCamelCase__: Union[str, Any] =os.path.basename(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =imports_to_builder_map[f_name.replace(".py" , "")]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""")
shutil.copy(UpperCAmelCase_ , UpperCAmelCase_)
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""")
| 10 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : str = logging.get_logger(__name__)
A_ : str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : int = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 333 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ConvNextFeatureExtractor']
lowerCAmelCase__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 11 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 | 0 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = ['pixel_values']
def __init__( self: Any , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: int = 8 , **UpperCamelCase_: Tuple , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_pad
__lowerCamelCase = pad_size
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: np.ndarray , UpperCamelCase_: float , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: int , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None ):
__lowerCamelCase, __lowerCamelCase = get_image_size(UpperCamelCase_ )
__lowerCamelCase = (old_height // size + 1) * size - old_height
__lowerCamelCase = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: ImageInput , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[float] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_pad if do_pad is not None else self.do_pad
__lowerCamelCase = pad_size if pad_size is not None else self.pad_size
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
__lowerCamelCase = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 12 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE_: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
SCREAMING_SNAKE_CASE_: Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(self.tmpdirname , lowerCAmelCase__)
with open(self.image_processor_file , "w" , encoding="utf-8") as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowerCAmelCase__ : Optional[int]):
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase__ : str):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , **lowerCAmelCase__ : Tuple):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
shutil.rmtree(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
SCREAMING_SNAKE_CASE_: Optional[int] = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1)) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_: Optional[Any] = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
processor_slow.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_: Optional[int] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
processor_fast.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_: Tuple = AlignProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__)
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__)
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: List[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)")
SCREAMING_SNAKE_CASE_: List[str] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0)
SCREAMING_SNAKE_CASE_: Dict = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_: Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: List[Any] = image_processor(lowerCAmelCase__ , return_tensors="np")
SCREAMING_SNAKE_CASE_: int = processor(images=lowerCAmelCase__ , return_tensors="np")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_: List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Any = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = "lower newer"
SCREAMING_SNAKE_CASE_: Tuple = processor(text=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = tokenizer(lowerCAmelCase__ , padding="max_length" , max_length=64)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE_: str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Dict = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = "lower newer"
SCREAMING_SNAKE_CASE_: Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: Optional[int] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__):
processor()
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_: int = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Optional[Any] = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_: Optional[Any] = processor.batch_decode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.batch_decode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_: Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Any = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = "lower newer"
SCREAMING_SNAKE_CASE_: str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_: int = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 13 |
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
# edges = list of graph's edges
__UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase , __UpperCAmelCase = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333 | 0 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''detr'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , UpperCAmelCase__ : str=True , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : List[str]=100 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : Any=2_048 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Any=2_048 , UpperCAmelCase__ : Any=8 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : int=256 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : Optional[int]="sine" , UpperCAmelCase__ : Union[str, Any]="resnet50" , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=0.1 , **UpperCAmelCase__ : str , ) ->List[Any]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
A__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = backbone_config.get('''model_type''')
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(UpperCAmelCase__)
# set timm attributes to None
A__ , A__ , A__ = None, None, None
A__ = use_timm_backbone
A__ = backbone_config
A__ = num_channels
A__ = num_queries
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = init_xavier_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = encoder_layers
A__ = auxiliary_loss
A__ = position_embedding_type
A__ = backbone
A__ = use_pretrained_backbone
A__ = dilation
# Hungarian matcher
A__ = class_cost
A__ = bbox_cost
A__ = giou_cost
# Loss coefficients
A__ = mask_loss_coefficient
A__ = dice_loss_coefficient
A__ = bbox_loss_coefficient
A__ = giou_loss_coefficient
A__ = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
return self.d_model
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[Any]) ->str:
'''simple docstring'''
return cls(backbone_config=UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Dict[str, any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
A__ = self.backbone_config.to_dict()
A__ = self.__class__.model_type
return output
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : str) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
return 12
| 14 |
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : int = ['a', 'b', 'c', 'd', 'e']
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
A_ : Tuple = topological_sort('a', [], [])
print(sort)
| 333 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
__A = tempfile.mkdtemp()
__A = BlipImageProcessor()
__A = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
__A = BlipProcessor(A ,A )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Union[str, Any] ,**A : int ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).tokenizer
def UpperCamelCase_ ( self : List[str] ,**A : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**A ).image_processor
def UpperCamelCase_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : int ):
__A = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__A = [Image.fromarray(np.moveaxis(A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : List[Any] ):
__A = BlipProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
__A = self.get_image_processor(do_normalize=A ,padding_value=1.0 )
__A = BlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = self.prepare_image_inputs()
__A = image_processor(A ,return_tensors="np" )
__A = processor(images=A ,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = processor(text=A )
__A = tokenizer(A ,return_token_type_ids=A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A = processor.batch_decode(A )
__A = tokenizer.batch_decode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.get_image_processor()
__A = self.get_tokenizer()
__A = BlipProcessor(tokenizer=A ,image_processor=A )
__A = "lower newer"
__A = self.prepare_image_inputs()
__A = processor(text=A ,images=A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["pixel_values", "input_ids", "attention_mask"] )
| 15 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self : int ,_snake_case : Union[str, Any] ,_snake_case : Tuple=12 ,_snake_case : Optional[int]=7 ,_snake_case : List[Any]=True ,_snake_case : int=True ,_snake_case : Optional[int]=True ,_snake_case : Union[str, Any]=99 ,_snake_case : int=32 ,_snake_case : int=32 ,_snake_case : List[Any]=2 ,_snake_case : int=4 ,_snake_case : Union[str, Any]=37 ,_snake_case : Dict=0.1 ,_snake_case : int=0.1 ,_snake_case : int=512 ,_snake_case : Tuple=0.02 ,_snake_case : Optional[Any]=0 ,_snake_case : Union[str, Any]=None ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = parent
lowercase__ : str = batch_size
lowercase__ : Optional[int] = seq_length
lowercase__ : Optional[Any] = is_training
lowercase__ : Dict = use_input_mask
lowercase__ : Union[str, Any] = use_labels
lowercase__ : str = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : Optional[Any] = projection_dim
lowercase__ : str = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = dropout
lowercase__ : Optional[int] = attention_dropout
lowercase__ : List[str] = max_position_embeddings
lowercase__ : Tuple = initializer_range
lowercase__ : Union[str, Any] = scope
lowercase__ : Union[str, Any] = bos_token_id
def UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase__ : Any = None
if self.use_input_mask:
lowercase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowercase__ : Any = input_mask.numpy()
lowercase__ , lowercase__ : List[str] = input_mask.shape
lowercase__ : Tuple = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(_snake_case ):
lowercase__ : Union[str, Any] = 1
lowercase__ : Any = 0
lowercase__ : str = self.get_config()
return config, input_ids, tf.convert_to_tensor(_snake_case )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : Dict ,_snake_case : List[Any] ,_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[Any] = TFBlipTextModel(config=_snake_case )
lowercase__ : List[Any] = model(_snake_case ,attention_mask=_snake_case ,training=_snake_case )
lowercase__ : List[Any] = model(_snake_case ,training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs
lowercase__ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Union[str, Any] = False
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowercase__ : Optional[Any] = BlipTextModelTester(self )
lowercase__ : List[Any] = ConfigTester(self ,config_class=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCAmelCase ( self : int ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
@slow
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = TFBlipTextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : List[str]=True ) -> Optional[int]:
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_snake_case )
| 16 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
model.train()
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
__UpperCAmelCase = RegressionModel()
__UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase = RegressionDataset(length=9_6 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__lowercase = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=UpperCAmelCase__, cache_dir=UpperCAmelCase__ )
__lowercase = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase__, os.listdir(UpperCAmelCase__ )[0], "snapshots" ) )]
__lowercase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=UpperCAmelCase__ )
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 4
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1
__lowercase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase__ ) == num_samples
def _lowercase ( self : int ):
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=UpperCAmelCase__ )
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__ )
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa )
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1
def _lowercase ( self : Optional[int] ):
__lowercase = FlaxDDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=UpperCAmelCase__, steps_offset=1, )
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, )
__lowercase = scheduler.create_state()
__lowercase = scheduler_state
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = 5_0
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1
def _lowercase ( self : Dict ):
__lowercase = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__lowercase = jax.device_count()
__lowercase = num_samples * [prompt]
__lowercase = jax.random.split(jax.random.PRNGKey(0 ), UpperCAmelCase__ )
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__, )
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
__lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__, use_memory_efficient_attention=UpperCAmelCase__, )
__lowercase = replicate(UpperCAmelCase__ )
__lowercase = pipeline.prepare_inputs(UpperCAmelCase__ )
__lowercase = shard(UpperCAmelCase__ )
__lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 17 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
A_ : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( A__ , unittest.TestCase ):
A = LEDTokenizer
A = LEDTokenizerFast
A = True
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE_ : Tuple = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(_A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(_A ) )
def __UpperCamelCase ( self : Tuple,**_A : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : str,**_A : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : Tuple,_A : Any ):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
SCREAMING_SNAKE_CASE_ : List[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(_A,max_length=len(_A ),padding=_A,return_tensors="pt" )
self.assertIsInstance(_A,_A )
self.assertEqual((2, 9),batch.input_ids.shape )
self.assertEqual((2, 9),batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_A,_A )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : int = tokenizer(_A,padding=_A,return_tensors="pt" )
self.assertIn("input_ids",_A )
self.assertIn("attention_mask",_A )
self.assertNotIn("labels",_A )
self.assertNotIn("decoder_attention_mask",_A )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(text_target=_A,max_length=32,padding="max_length",return_tensors="pt" )
self.assertEqual(32,targets["input_ids"].shape[1] )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(
["I am a small frog" * 1024, "I am a small frog"],padding=_A,truncation=_A,return_tensors="pt" )
self.assertIsInstance(_A,_A )
self.assertEqual(batch.input_ids.shape,(2, 5122) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["A long paragraph for summarization."]
SCREAMING_SNAKE_CASE_ : str = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(_A,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(text_target=_A,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : Optional[int] = inputs["input_ids"]
SCREAMING_SNAKE_CASE_ : Any = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : str = ["Summary of the text.", "Another summary."]
SCREAMING_SNAKE_CASE_ : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(_A,padding=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = [[0] * len(_A ) for x in encoded_output["input_ids"]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.pad(_A )
self.assertSequenceEqual(outputs["global_attention_mask"],_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(_A,**_A )
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer_class.from_pretrained(_A,**_A )
SCREAMING_SNAKE_CASE_ : Any = "A, <mask> AllenNLP sentence."
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.encode_plus(_A,add_special_tokens=_A,return_token_type_ids=_A )
SCREAMING_SNAKE_CASE_ : Any = tokenizer_p.encode_plus(_A,add_special_tokens=_A,return_token_type_ids=_A )
self.assertEqual(sum(tokens_r["token_type_ids"] ),sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ),sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ),)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
SCREAMING_SNAKE_CASE_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"],[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"],[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_A,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_A,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 18 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [''''''.join(SCREAMING_SNAKE_CASE ) for row in temp_grid]
__UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE )
return output_string
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
__UpperCAmelCase = 0
for row in temp_grid: # fills in the characters
__UpperCAmelCase = input_string[counter : counter + len(SCREAMING_SNAKE_CASE )]
grid.append(list(SCREAMING_SNAKE_CASE ) )
counter += len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''''' # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __a ( SCREAMING_SNAKE_CASE ) -> dict[int, str]:
'''simple docstring'''
__UpperCAmelCase = {}
for key_guess in range(1 , len(SCREAMING_SNAKE_CASE ) ): # tries every key
__UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A =logging.get_logger(__name__)
__A ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__A ={
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__A ={'''facebook/blenderbot-3B''': 1_2_8}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BlenderbotTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> Any:
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase ) != add_prefix_space:
lowerCamelCase_ = getattr(lowercase , pre_tok_state.pop("type" ) )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = pre_tok_class(**lowercase )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = "post_processor"
lowerCamelCase_ = getattr(self.backend_tokenizer , lowercase , lowercase )
if tokenizer_component_instance:
lowerCamelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase_ = tuple(state["sep"] )
if "cls" in state:
lowerCamelCase_ = tuple(state["cls"] )
lowerCamelCase_ = False
if state.get("add_prefix_space" , lowercase ) != add_prefix_space:
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = True
if state.get("trim_offsets" , lowercase ) != trim_offsets:
lowerCamelCase_ = trim_offsets
lowerCamelCase_ = True
if changes_to_apply:
lowerCamelCase_ = getattr(lowercase , state.pop("type" ) )
lowerCamelCase_ = component_class(**lowercase )
setattr(self.backend_tokenizer , lowercase , lowercase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def SCREAMING_SNAKE_CASE_( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]:
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else value
lowerCamelCase_ = value
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> BatchEncoding:
lowerCamelCase_ = kwargs.get("is_split_into_words" , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , *lowercase , **lowercase ) -> BatchEncoding:
lowerCamelCase_ = kwargs.get("is_split_into_words" , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCamelCase_ = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> int:
return token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[int]:
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(lowercase )
lowerCamelCase_ = " ".join(lowercase )
lowerCamelCase_ = self.encode(lowercase )
if len(lowercase ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 19 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( _a , _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = StableUnCLIPPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ = False
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> List[Any]:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowercase__ )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=lowercase__ , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __snake_case ( unittest.TestCase ):
_a : Optional[int]= MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = hf_hub_download(
repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
lowercase : List[str] = VideoClassificationPipeline(model=snake_case ,image_processor=snake_case ,top_k=2 )
lowercase : Dict = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
for example in examples:
lowercase : int = video_classifier(snake_case )
self.assertEqual(
snake_case ,[
{"""score""": ANY(snake_case ), """label""": ANY(snake_case )},
{"""score""": ANY(snake_case ), """label""": ANY(snake_case )},
] ,)
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
lowercase : str = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} ,crop_size={"""height""": 10, """width""": 10} )
lowercase : List[Any] = pipeline(
"""video-classification""" ,model=snake_case ,feature_extractor=snake_case ,frame_sampling_rate=4 )
lowercase : Dict = hf_hub_download(repo_id="""nateraw/video-demo""" ,filename="""archery.mp4""" ,repo_type="""dataset""" )
lowercase : Any = video_classifier(snake_case ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] ,)
lowercase : str = video_classifier(
[
video_file_path,
video_file_path,
] ,top_k=2 ,)
self.assertEqual(
nested_simplify(snake_case ,decimals=4 ) ,[
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] ,)
@require_tf
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
| 20 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int = logging.get_logger(__name__)
A_ : str = {'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["input_ids", "attention_mask"]
a__ = None
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowercase__ )
__UpperCAmelCase = add_prefix_space
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 333 | 0 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class _lowerCamelCase( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)])
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : Tuple = GenerationConfig(
do_sample=lowerCamelCase, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase, config_name=lowerCamelCase)
_lowercase : List[str] = GenerationConfig.from_pretrained(lowerCamelCase, config_name=lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample, lowerCamelCase)
self.assertEqual(loaded_config.temperature, 0.7)
self.assertEqual(loaded_config.length_penalty, 1.0)
self.assertEqual(loaded_config.bad_words_ids, [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k, 50)
self.assertEqual(loaded_config.max_length, 20)
self.assertEqual(loaded_config.max_time, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = AutoConfig.from_pretrained('gpt2')
_lowercase : List[str] = GenerationConfig.from_model_config(lowerCamelCase)
_lowercase : Union[str, Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowerCamelCase, lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id, default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id, model_config.eos_token_id)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Union[str, Any] = GenerationConfig()
_lowercase : List[Any] = {
'max_new_tokens': 10_24,
'foo': 'bar',
}
_lowercase : Any = copy.deepcopy(lowerCamelCase)
_lowercase : List[Any] = generation_config.update(**lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(lowerCamelCase, lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens, 10_24)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowerCamelCase, {'foo': 'bar'})
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[Any] = GenerationConfig()
_lowercase : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config') as tmp_dir:
generation_config.save_pretrained(lowerCamelCase)
_lowercase : List[Any] = GenerationConfig.from_pretrained(lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo, 'bar')
_lowercase : int = GenerationConfig.from_model_config(lowerCamelCase)
assert not hasattr(lowerCamelCase, 'foo') # no new kwargs should be initialized if from config
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[int] = GenerationConfig()
self.assertEqual(default_config.temperature, 1.0)
self.assertEqual(default_config.do_sample, lowerCamelCase)
self.assertEqual(default_config.num_beams, 1)
_lowercase : Optional[Any] = GenerationConfig(
do_sample=lowerCamelCase, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], )
self.assertEqual(config.temperature, 0.7)
self.assertEqual(config.do_sample, lowerCamelCase)
self.assertEqual(config.num_beams, 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase)
_lowercase : List[Any] = GenerationConfig.from_pretrained(lowerCamelCase, temperature=1.0)
self.assertEqual(loaded_config.temperature, 1.0)
self.assertEqual(loaded_config.do_sample, lowerCamelCase)
self.assertEqual(loaded_config.num_beams, 1) # default value
@is_staging_test
class _lowerCamelCase( unittest.TestCase ):
@classmethod
def UpperCamelCase ( cls) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = TOKEN
HfFolder.save_token(lowerCamelCase)
@classmethod
def UpperCamelCase ( cls) -> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='test-generation-config')
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-generation-config-org')
except HTTPError:
pass
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = GenerationConfig(
do_sample=lowerCamelCase, temperature=0.7, length_penalty=1.0, )
config.push_to_hub('test-generation-config', use_auth_token=self._token)
_lowercase : Any = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase))
# Reset repo
delete_repo(token=self._token, repo_id='test-generation-config')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase, repo_id='test-generation-config', push_to_hub=lowerCamelCase, use_auth_token=self._token)
_lowercase : Tuple = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = GenerationConfig(
do_sample=lowerCamelCase, temperature=0.7, length_penalty=1.0, )
config.push_to_hub('valid_org/test-generation-config-org', use_auth_token=self._token)
_lowercase : Any = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase))
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-generation-config-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase, repo_id='valid_org/test-generation-config-org', push_to_hub=lowerCamelCase, use_auth_token=self._token)
_lowercase : Optional[Any] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase, getattr(lowerCamelCase, lowerCamelCase))
| 21 |
import math
import sys
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
__UpperCAmelCase = [-1] * (number + 1)
__UpperCAmelCase = 0
for i in range(1 , number + 1 ):
__UpperCAmelCase = sys.maxsize
__UpperCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
for j in range(1 , root + 1 ):
__UpperCAmelCase = 1 + answers[i - (j**2)]
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_lowerCamelCase : Optional[int] = """LayoutLMv3ImageProcessor"""
_lowerCamelCase : Union[str, Any] = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : Tuple , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , **snake_case_ : List[str] ):
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case_ , )
_UpperCAmelCase = kwargs.pop("feature_extractor" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case_ , snake_case_ )
def __call__( self : List[str] , snake_case_ : Tuple , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , snake_case_ : Union[List[List[int]], List[List[List[int]]]] = None , snake_case_ : Optional[Union[List[int], List[List[int]]]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_UpperCAmelCase = self.image_processor(images=snake_case_ , return_tensors=snake_case_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
_UpperCAmelCase = features["words"]
_UpperCAmelCase = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel values
_UpperCAmelCase = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_UpperCAmelCase = self.get_overflowing_images(snake_case_ , encoded_inputs["overflow_to_sample_mapping"] )
_UpperCAmelCase = images
return encoded_inputs
def lowercase ( self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Tuple ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(snake_case_ )} and {len(snake_case_ )}' )
return images_with_overflow
def lowercase ( self : Tuple , *snake_case_ : Optional[int] , **snake_case_ : Any ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowercase ( self : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : Any ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowercase ( self : Optional[Any] ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowercase ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case_ , )
return self.image_processor_class
@property
def lowercase ( self : Tuple ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case_ , )
return self.image_processor
| 22 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = b.T
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
__UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = x.reshape(-1 , 3 )
__UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class A_ ( _a ):
'''simple docstring'''
a__ = ["pixel_values"]
def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None:
super().__init__(**lowercase__ )
__UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256}
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_color_quantize
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ )
__UpperCAmelCase = image - 1
return image
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase = clusters if clusters is not None else self.clusters
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase = images.shape[0]
__UpperCAmelCase = images.reshape(lowercase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase = list(lowercase__ )
else:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__UpperCAmelCase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 333 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__: Any = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: List[str] = ["GLPNFeatureExtractor"]
UpperCamelCase__: Tuple = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: List[str] = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCamelCase__: Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[int] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['PoolFormerFeatureExtractor']
A_ : Dict = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 333 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Optional[int] = 'gptsan-japanese'
A_ : Any = [
'past_key_values',
]
A_ : Any = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self : Optional[int] , a__ : Optional[Any]=3_6000 , a__ : Union[str, Any]=1280 , a__ : List[Any]=1024 , a__ : List[str]=8192 , a__ : Union[str, Any]=4096 , a__ : int=128 , a__ : Optional[int]=10 , a__ : Tuple=0 , a__ : List[str]=16 , a__ : Any=16 , a__ : Dict=128 , a__ : str=0.0 , a__ : Tuple=1E-5 , a__ : Any=False , a__ : List[Any]=0.0 , a__ : Optional[Any]="float32" , a__ : int=False , a__ : Optional[int]=False , a__ : List[str]=False , a__ : Optional[int]=0.0_0_2 , a__ : List[Any]=False , a__ : List[Any]=True , a__ : Any=3_5998 , a__ : int=3_5995 , a__ : Union[str, Any]=3_5999 , **a__ : Tuple , ):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = d_ff
__snake_case = d_ext
__snake_case = d_spout
__snake_case = num_switch_layers
__snake_case = num_ext_layers
__snake_case = num_switch_layers + num_ext_layers
__snake_case = num_heads
__snake_case = num_experts
__snake_case = expert_capacity
__snake_case = dropout_rate
__snake_case = layer_norm_epsilon
__snake_case = router_bias
__snake_case = router_jitter_noise
__snake_case = router_dtype
__snake_case = router_ignore_padding_tokens
__snake_case = output_hidden_states
__snake_case = output_attentions
__snake_case = initializer_factor
__snake_case = output_router_logits
__snake_case = use_cache
super().__init__(
separator_token_id=a__ , pad_token_id=a__ , eos_token_id=a__ , **a__ , )
| 24 |
import math
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@property
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = ort.SessionOptions()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
return options
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
SCREAMING_SNAKE_CASE__ : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = """A red cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : int = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
SCREAMING_SNAKE_CASE__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = """A red cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : str = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Any = output.images
SCREAMING_SNAKE_CASE__ : int = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 25 |
def __a ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
A_ : Union[str, Any] = generate_large_matrix()
A_ : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase = (left + right) // 2
__UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
_A : Union[str, Any] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_A : Union[str, Any] = 192
_A : str = 768
_A : List[Any] = 12
_A : str = 3
_A : int = [800, 1333]
_A : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
_A : List[str] = 330
_A : Optional[Any] = 14
_A : List[Any] = 6
_A : Optional[int] = 1320
elif "yolos_s" in yolos_name:
_A : Tuple = 384
_A : Any = 1536
_A : Any = 12
_A : List[Any] = 6
elif "yolos_b" in yolos_name:
_A : int = [800, 1344]
_A : Tuple = 91
_A : str = """huggingface/label-files"""
_A : Tuple = """coco-detection-id2label.json"""
_A : Any = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : Dict = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : Tuple = idalabel
_A : Any = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_A : int = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_A : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
_A : Union[str, Any] = in_proj_bias[: config.hidden_size]
_A : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A : Any = in_proj_weight[-config.hidden_size :, :]
_A : Optional[int] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( snake_case_ ):
if "backbone" in name:
_A : str = name.replace("""backbone""","""vit""" )
if "cls_token" in name:
_A : Any = name.replace("""cls_token""","""embeddings.cls_token""" )
if "det_token" in name:
_A : int = name.replace("""det_token""","""embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
_A : int = name.replace("""mid_pos_embed""","""encoder.mid_position_embeddings""" )
if "pos_embed" in name:
_A : Optional[int] = name.replace("""pos_embed""","""embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
_A : str = name.replace("""patch_embed.proj""","""embeddings.patch_embeddings.projection""" )
if "blocks" in name:
_A : Union[str, Any] = name.replace("""blocks""","""encoder.layer""" )
if "attn.proj" in name:
_A : Tuple = name.replace("""attn.proj""","""attention.output.dense""" )
if "attn" in name:
_A : str = name.replace("""attn""","""attention.self""" )
if "norm1" in name:
_A : Tuple = name.replace("""norm1""","""layernorm_before""" )
if "norm2" in name:
_A : Optional[Any] = name.replace("""norm2""","""layernorm_after""" )
if "mlp.fc1" in name:
_A : Optional[int] = name.replace("""mlp.fc1""","""intermediate.dense""" )
if "mlp.fc2" in name:
_A : Dict = name.replace("""mlp.fc2""","""output.dense""" )
if "class_embed" in name:
_A : Union[str, Any] = name.replace("""class_embed""","""class_labels_classifier""" )
if "bbox_embed" in name:
_A : Any = name.replace("""bbox_embed""","""bbox_predictor""" )
if "vit.norm" in name:
_A : Union[str, Any] = name.replace("""vit.norm""","""vit.layernorm""" )
return name
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
for key in orig_state_dict.copy().keys():
_A : Tuple = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
_A : str = key.split(""".""" )
_A : Tuple = int(key_split[2] )
_A : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_A : Optional[Any] = val[:dim, :]
_A : str = val[
dim : dim * 2, :
]
_A : List[str] = val[-dim:, :]
else:
_A : Optional[int] = val[:dim]
_A : Union[str, Any] = val[dim : dim * 2]
_A : Tuple = val[-dim:]
else:
_A : List[Any] = val
return orig_state_dict
def lowerCAmelCase_ ( ):
_A : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_A : Optional[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = False ):
_A : Optional[int] = get_yolos_config(snake_case_ )
# load original state_dict
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )["""model"""]
# load 🤗 model
_A : Tuple = YolosForObjectDetection(snake_case_ )
model.eval()
_A : Optional[int] = convert_state_dict(snake_case_,snake_case_ )
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by YolosImageProcessor
_A : List[str] = 800 if yolos_name != """yolos_ti""" else 512
_A : Optional[Any] = YolosImageProcessor(format="""coco_detection""",size=snake_case_ )
_A : Dict = image_processor(images=prepare_img(),return_tensors="""pt""" )
_A : List[Any] = model(**snake_case_ )
_A , _A : Optional[int] = outputs.logits, outputs.pred_boxes
_A , _A : int = None, None
if yolos_name == "yolos_ti":
_A : Any = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
_A : Optional[Any] = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
_A : Union[str, Any] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
_A : str = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
_A : str = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
_A : Optional[int] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
_A : str = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
_A : Any = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
_A : Any = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
_A : List[str] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3],snake_case_,atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3],snake_case_,atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if push_to_hub:
_A : Optional[int] = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
_A : int = model_mapping[yolos_name]
image_processor.push_to_hub(snake_case_,organization="""hustvl""" )
model.push_to_hub(snake_case_,organization="""hustvl""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_snake_case = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 26 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A_ : List[str] = sys.version_info >= (3, 10)
def __a ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
@dataclass
class A_ :
'''simple docstring'''
a__ = list_field(default=[] )
a__ = list_field(default=[1, 2, 3] )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
a__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A_ :
'''simple docstring'''
a__ = field()
a__ = field()
a__ = field()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field()
a__ = None
a__ = field(default="toto" , metadata={"help": "help message"} )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , lowercase__ ) and yy.get('''choices''' , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowercase__ ) , yy['''type'''](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--bar''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--baz''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--flag''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((__UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowercase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
__UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase_ (self ) -> str:
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
__UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--bar''' , default=lowercase__ , type=lowercase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowercase__ )
__UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
__UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--required_str''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
__UpperCAmelCase = parser.parse_dict(lowercase__ )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_json''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_yaml''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 333 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = tempfile.mkdtemp()
# fmt: off
__a : Tuple = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__a : List[Any] = dict(zip(__a , range(len(__a ) ) ) )
__a : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__a : Dict = {'unk_token': '<unk>'}
__a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
__a : List[str] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
__a : Optional[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__a , __a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : str = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_tokenizer()
__a : Optional[Any] = self.get_rust_tokenizer()
__a : Dict = self.get_image_processor()
__a : Any = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__a : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__a : Union[str, Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__a : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__a : int = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__a : Union[str, Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_image_processor()
__a : Optional[int] = self.get_tokenizer()
__a : str = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : Dict = self.prepare_image_inputs()
__a : Tuple = image_processor(__a , return_tensors='np' )
__a : List[Any] = processor(images=__a , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_image_processor()
__a : List[str] = self.get_tokenizer()
__a : Optional[Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : Optional[Any] = 'lower newer'
__a : Union[str, Any] = processor(text=__a )
__a : str = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : List[Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : List[str] = 'lower newer'
__a : Optional[int] = self.prepare_image_inputs()
__a : Tuple = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : Tuple = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : Dict = self.prepare_image_inputs()
__a : List[str] = self.prepare_image_inputs()
__a : int = processor(images=__a , visual_prompt=__a )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_image_processor()
__a : Union[str, Any] = self.get_tokenizer()
__a : Union[str, Any] = CLIPSegProcessor(tokenizer=__a , image_processor=__a )
__a : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Any = processor.batch_decode(__a )
__a : Optional[Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
| 27 |
import doctest
from collections import deque
import numpy as np
class A_ :
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = [2, 1, 2, -1]
__UpperCAmelCase = [1, 2, 3, 4]
def lowerCAmelCase_ (self ) -> list[float]:
__UpperCAmelCase = len(self.first_signal )
__UpperCAmelCase = len(self.second_signal )
__UpperCAmelCase = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 333 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowerCamelCase : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( A__ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(A__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = size if size is not None else {'shortest_edge': 2_2_4}
UpperCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCamelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCamelCase = get_size_dict(UpperCamelCase__ , param_name='crop_size' )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" in size:
UpperCamelCase = get_resize_output_image_size(UpperCamelCase__ , size['shortest_edge'] , default_to_square=UpperCamelCase__ )
elif "height" in size and "width" in size:
UpperCamelCase = (size['height'], size['width'])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCamelCase__ , size=(size['height'], size['width']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ):
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase = to_numpy_array(UpperCamelCase__ )
if do_resize:
UpperCamelCase = self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ )
if do_center_crop:
UpperCamelCase = self.center_crop(UpperCamelCase__ , size=UpperCamelCase__ )
if do_rescale:
UpperCamelCase = self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ )
if do_normalize:
UpperCamelCase = self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ )
UpperCamelCase = to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ )
return image
def A ( self : List[str] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(UpperCamelCase__ , param_name='crop_size' )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
UpperCamelCase = make_batched(UpperCamelCase__ )
UpperCamelCase = [
[
self._preprocess_image(
image=UpperCamelCase__ , do_resize=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , crop_size=UpperCamelCase__ , do_rescale=UpperCamelCase__ , rescale_factor=UpperCamelCase__ , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , data_format=UpperCamelCase__ , )
for img in video
]
for video in videos
]
UpperCamelCase = {'pixel_values': videos}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 28 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( _a ):
'''simple docstring'''
a__ = "pegasus"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def lowerCAmelCase_ (self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ (self ) -> int:
return self.d_model
| 333 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Tuple = '''gpt_neox_japanese'''
def __init__( self , _UpperCamelCase=3_2_0_0_0 , _UpperCamelCase=2_5_6_0 , _UpperCamelCase=3_2 , _UpperCamelCase=3_2 , _UpperCamelCase=4 , _UpperCamelCase="gelu" , _UpperCamelCase=1.00 , _UpperCamelCase=1_0_0_0_0 , _UpperCamelCase=2_0_4_8 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-5 , _UpperCamelCase=True , _UpperCamelCase=3_1_9_9_6 , _UpperCamelCase=3_1_9_9_9 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , **_UpperCamelCase , ) -> str:
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_multiple_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : List[Any] = rotary_pct
UpperCAmelCase_ : Tuple = rotary_emb_base
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : List[str] = hidden_dropout
| 29 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = LongformerTokenizer
a__ = True
a__ = LongformerTokenizerFast
a__ = True
def lowerCAmelCase_ (self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCAmelCase = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def lowerCAmelCase_ (self , **lowercase__ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True)
self.assertListEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokens + [tokenizer.unk_token]
__UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = '''Encode this sequence.'''
__UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase__ , lowercase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
# Testing spaces after special tokens
__UpperCAmelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
__UpperCAmelCase = '''Encode <mask> sequence'''
__UpperCAmelCase = '''Encode <mask>sequence'''
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
pass
def lowerCAmelCase_ (self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
__UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase_ (self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}'''
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
| 333 | 0 |
from __future__ import annotations
def a ( snake_case__: list , snake_case__: int ):
'''simple docstring'''
# Checks if the entire collection has been sorted
if len(snake_case__ ) <= 1 or n <= 1:
return
insert_next(snake_case__ , n - 1 )
rec_insertion_sort(snake_case__ , n - 1 )
def a ( snake_case__: list , snake_case__: int ):
'''simple docstring'''
# Checks order between adjacent elements
if index >= len(snake_case__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowercase_ , lowercase_ = (
collection[index],
collection[index - 1],
)
insert_next(snake_case__ , index + 1 )
if __name__ == "__main__":
__a = input('Enter integers separated by spaces: ')
__a = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 30 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_000}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = 10
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ (self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 333 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return 0
elif n == 2:
return 1
else:
_UpperCAmelCase : List[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Dict = 2
while digits < n:
index += 1
_UpperCAmelCase : Union[str, Any] = len(str(fibonacci(_UpperCAmelCase ) ) )
return index
def UpperCamelCase_ ( _UpperCAmelCase : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(_UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 31 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = num_labels
__UpperCAmelCase = image_size
__UpperCAmelCase = layer_depths
__UpperCAmelCase = embed_dims
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = SwiftFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> Optional[int]:
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = SwiftFormerModelTester(self )
__UpperCAmelCase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase_ (self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = 8
self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
def _config_zero_init(lowercase__ ):
__UpperCAmelCase = copy.deepcopy(lowercase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase__ , lowercase__ , 1E-10 )
if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ):
__UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) )
setattr(lowercase__ , lowercase__ , lowercase__ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def __a ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 333 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=False , SCREAMING_SNAKE_CASE__ : Any=False , ) -> Tuple:
a_ : List[Any] = 4
a_ : Any = 3_2
a_ : List[str] = (3_2, 3_2)
a_ : Any = torch.manual_seed(0 )
a_ : Dict = torch.device(SCREAMING_SNAKE_CASE__ )
a_ : str = (batch_size, num_channels) + sizes
a_ : Optional[int] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = {'hidden_states': hidden_states}
if include_temb:
a_ : Any = 1_2_8
a_ : Optional[int] = randn_tensor((batch_size, temb_channels) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
if include_res_hidden_states_tuple:
a_ : int = torch.manual_seed(1 )
a_ : List[str] = (randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ ),)
if include_encoder_hidden_states:
a_ : str = floats_tensor((batch_size, 3_2, 3_2) ).to(SCREAMING_SNAKE_CASE__ )
if include_skip_sample:
a_ : Union[str, Any] = randn_tensor(((batch_size, 3) + sizes) , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
return dummy_input
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
a_ : Union[str, Any] = {
'in_channels': 3_2,
'out_channels': 3_2,
'temb_channels': 1_2_8,
}
if self.block_type == "up":
a_ : Dict = 3_2
if self.block_type == "mid":
init_dict.pop('out_channels' )
a_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]:
a_ , a_ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
a_ : Tuple = self.block_class(**SCREAMING_SNAKE_CASE__ )
unet_block.to(SCREAMING_SNAKE_CASE__ )
unet_block.eval()
with torch.no_grad():
a_ : Tuple = unet_block(**SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Any = output[0]
self.assertEqual(output.shape , self.output_shape )
a_ : int = output[0, -1, -3:, -3:]
a_ : str = torch.tensor(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
assert torch_all_close(output_slice.flatten() , SCREAMING_SNAKE_CASE__ , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
a_ , a_ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
a_ : Tuple = self.block_class(**SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.train()
a_ : str = model(**SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
a_ : Dict = output[0]
a_ : Optional[int] = torch.device(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = randn_tensor(output.shape , device=SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
| 32 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : str = logging.get_logger(__name__)
A_ : str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : int = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 333 | 0 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__A : List[str] = re.compile(R'''^(?P<major>\d+)''' R'''\.(?P<minor>\d+)''' R'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = None
SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = None
SCREAMING_SNAKE_CASE_ : Optional[Union[str, int]] = None
def A ( self : Optional[int] ) -> Union[str, Any]:
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : int ) -> List[Any]:
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def A ( self : Any ) -> Union[str, Any]:
return self.major, self.minor, self.patch
def A ( self : Dict , A : List[Any] ) -> Tuple:
if isinstance(A , A ):
return Version(A )
elif isinstance(A , A ):
return other
raise TypeError(F'''{other} (type {type(A )}) cannot be compared to version.''' )
def __eq__( self : Union[str, Any] , A : str ) -> List[str]:
try:
lowercase_ : Optional[int] = self._validate_operand(A )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : int , A : str ) -> Union[str, Any]:
lowercase_ : Optional[Any] = self._validate_operand(A )
return self.tuple < other.tuple
def __hash__( self : Dict ) -> Optional[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def A ( cls : int , A : List[Any] ) -> List[str]:
lowercase_ : Dict = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def A ( self : Optional[int] ) -> str:
return self.version_str
def lowercase ( __snake_case : Any ):
lowercase_ : int = _VERSION_REG.match(__snake_case )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(__snake_case ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def lowercase ( __snake_case : int ):
return ".".join(str(__snake_case ) for v in version_tuple )
| 33 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 | 0 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A =logging.get_logger(__name__)
@add_end_docstrings(__a )
class _a ( __a ):
def __init__( self : Tuple , *lowercase : str , **lowercase : Optional[Any] ):
'''simple docstring'''
super().__init__(*lowercase , **lowercase )
self.check_model_type(lowercase )
def A ( self : str , lowercase : Any=None , lowercase : List[str]=None , lowercase : Optional[int]=None , **lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = {}, {}
if padding is not None:
UpperCAmelCase = padding
if truncation is not None:
UpperCAmelCase = truncation
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , lowercase : Union["Image.Image", str] , lowercase : str = None , **lowercase : int ):
'''simple docstring'''
if isinstance(lowercase , (Image.Image, str) ) and isinstance(lowercase , lowercase ):
UpperCAmelCase = {'''image''': image, '''question''': question}
else:
UpperCAmelCase = image
UpperCAmelCase = super().__call__(lowercase , **lowercase )
return results
def A ( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : Dict=False , lowercase : List[Any]=False ):
'''simple docstring'''
UpperCAmelCase = load_image(inputs['''image'''] )
UpperCAmelCase = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=lowercase , truncation=lowercase )
UpperCAmelCase = self.image_processor(images=lowercase , return_tensors=self.framework )
model_inputs.update(lowercase )
return model_inputs
def A ( self : int , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self.model(**lowercase )
return model_outputs
def A ( self : Any , lowercase : List[Any] , lowercase : Optional[Any]=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )]
| 34 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=None ) -> int:
require_version(deps[pkg] , _lowerCAmelCase )
| 35 |
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
# edges = list of graph's edges
__UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase , __UpperCAmelCase = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333 | 0 |
from maths.prime_factors import prime_factors
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : int = F"Input value of [number={number}] must be an integer"
raise TypeError(_lowerCamelCase )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(_lowerCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : int = ['a', 'b', 'c', 'd', 'e']
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
A_ : Tuple = topological_sort('a', [], [])
print(sort)
| 333 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : List[str] = '''roformer'''
def __init__( self ,__UpperCAmelCase=5_0000 ,__UpperCAmelCase=None ,__UpperCAmelCase=768 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=3072 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=1536 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=0 ,__UpperCAmelCase=False ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> List[Any]:
super().__init__(pad_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
lowerCAmelCase__ : Tuple = vocab_size
lowerCAmelCase__ : int = hidden_size if embedding_size is None else embedding_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : List[str] = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : int = rotary_value
lowerCAmelCase__ : List[str] = use_cache
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase__ : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ : List[str] = {0: """batch""", 1: """sequence"""}
lowerCAmelCase__ : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 37 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : Tuple = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ : Optional[int] = {
'''bert-base-uncased''': 5_12,
'''bert-large-uncased''': 5_12,
'''bert-base-cased''': 5_12,
'''bert-large-cased''': 5_12,
'''bert-base-multilingual-uncased''': 5_12,
'''bert-base-multilingual-cased''': 5_12,
'''bert-base-chinese''': 5_12,
'''bert-base-german-cased''': 5_12,
'''bert-large-uncased-whole-word-masking''': 5_12,
'''bert-large-cased-whole-word-masking''': 5_12,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_12,
'''bert-base-cased-finetuned-mrpc''': 5_12,
'''bert-base-german-dbmdz-cased''': 5_12,
'''bert-base-german-dbmdz-uncased''': 5_12,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_12,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_12,
'''wietsedv/bert-base-dutch-cased''': 5_12,
}
UpperCAmelCase_ : List[str] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Tuple = VOCAB_FILES_NAMES
snake_case__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : List[str] = BertTokenizer
def __init__( self : Optional[Any] , __lowerCamelCase : str=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple="[UNK]" , __lowerCamelCase : Dict="[SEP]" , __lowerCamelCase : Any="[PAD]" , __lowerCamelCase : str="[CLS]" , __lowerCamelCase : Dict="[MASK]" , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Union[str, Any] , ):
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
UpperCamelCase :Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __lowerCamelCase ) != tokenize_chinese_chars
):
UpperCamelCase :Union[str, Any] = getattr(__lowerCamelCase , normalizer_state.pop("""type""" ) )
UpperCamelCase :Optional[int] = do_lower_case
UpperCamelCase :Tuple = strip_accents
UpperCamelCase :str = tokenize_chinese_chars
UpperCamelCase :str = normalizer_class(**__lowerCamelCase )
UpperCamelCase :Optional[int] = do_lower_case
def _A ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=None ):
UpperCamelCase :Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _A ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
UpperCamelCase :Optional[int] = [self.sep_token_id]
UpperCamelCase :Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
UpperCamelCase :Dict = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 38 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
model.train()
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
__UpperCAmelCase = RegressionModel()
__UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase = RegressionDataset(length=9_6 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 0 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = val
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
_UpperCAmelCase = Node(UpperCAmelCase )
else:
self.left.insert(UpperCAmelCase )
elif val > self.val:
if self.right is None:
_UpperCAmelCase = Node(UpperCAmelCase )
else:
self.right.insert(UpperCAmelCase )
else:
_UpperCAmelCase = val
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
"""simple docstring"""
if root:
inorder(root.left , __lowerCAmelCase )
res.append(root.val )
inorder(root.right , __lowerCAmelCase )
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
if len(__lowerCAmelCase ) == 0:
return arr
_UpperCAmelCase = Node(arr[0] )
for i in range(1 , len(__lowerCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
_UpperCAmelCase = []
inorder(__lowerCAmelCase , __lowerCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 39 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
A_ : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = SpeechTaTokenizer
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Any = True
def __snake_case ( self : Optional[Any]):
super().setUp()
# We have a SentencePiece fixture for testing
a : Optional[int] = SpeechTaTokenizer(__UpperCAmelCase)
a : List[str] = AddedToken("<mask>" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase)
a : List[Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token})
tokenizer.add_tokens(["<ctc_blank>"])
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str):
a : Dict = "this is a test"
a : Tuple = "this is a test"
return input_text, output_text
def __snake_case ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=20 , __UpperCAmelCase : Tuple=5):
a , a : List[Any] = self.get_input_output_texts(__UpperCAmelCase)
a : Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
a : List[str] = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase)
return text, ids
def __snake_case ( self : Optional[Any]):
a : Dict = "<pad>"
a : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : Tuple = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(vocab_keys[-4] , "œ")
self.assertEqual(vocab_keys[-2] , "<mask>")
self.assertEqual(vocab_keys[-1] , "<ctc_blank>")
self.assertEqual(len(__UpperCAmelCase) , 81)
def __snake_case ( self : Optional[int]):
self.assertEqual(self.get_tokenizer().vocab_size , 79)
def __snake_case ( self : int):
a : Any = self.get_tokenizers(do_lower_case=__UpperCAmelCase)
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}'''):
a : int = tokenizer.vocab_size
a : Optional[Any] = len(__UpperCAmelCase)
self.assertNotEqual(__UpperCAmelCase , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
a : List[str] = ["aaaaa bbbbbb", "cccccccccdddddddd"]
a : Tuple = tokenizer.add_tokens(__UpperCAmelCase)
a : Optional[int] = tokenizer.vocab_size
a : Any = len(__UpperCAmelCase)
self.assertNotEqual(__UpperCAmelCase , 0)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , len(__UpperCAmelCase))
self.assertEqual(__UpperCAmelCase , all_size + len(__UpperCAmelCase))
a : Union[str, Any] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__UpperCAmelCase)
self.assertGreaterEqual(len(__UpperCAmelCase) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
a : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
a : Any = tokenizer.add_special_tokens(__UpperCAmelCase)
a : List[str] = tokenizer.vocab_size
a : List[str] = len(__UpperCAmelCase)
self.assertNotEqual(__UpperCAmelCase , 0)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , len(__UpperCAmelCase))
self.assertEqual(__UpperCAmelCase , all_size_a + len(__UpperCAmelCase))
a : Union[str, Any] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__UpperCAmelCase)
self.assertGreaterEqual(len(__UpperCAmelCase) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def __snake_case ( self : Dict):
pass
def __snake_case ( self : str):
pass
def __snake_case ( self : Union[str, Any]):
a : Union[str, Any] = self.get_tokenizer()
a : List[str] = tokenizer.tokenize("This is a test")
# fmt: off
self.assertListEqual(__UpperCAmelCase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
a : Any = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__UpperCAmelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."])
a : Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
# fmt: off
self.assertListEqual(__UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
a : Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."])
@slow
def __snake_case ( self : Any):
# Use custom sequence because this tokenizer does not handle numbers.
a : Union[str, Any] = [
"Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides "
"general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural "
"Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained "
"models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.",
"BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly "
"conditioning on both left and right context in all layers.",
"The quick brown fox jumps over the lazy dog.",
]
# fmt: off
a : str = {
"input_ids": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=__UpperCAmelCase , )
| 40 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )]
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [''''''.join(SCREAMING_SNAKE_CASE ) for row in temp_grid]
__UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE )
return output_string
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
__UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
__UpperCAmelCase = 0
for row in temp_grid: # fills in the characters
__UpperCAmelCase = input_string[counter : counter + len(SCREAMING_SNAKE_CASE )]
grid.append(list(SCREAMING_SNAKE_CASE ) )
counter += len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = '''''' # reads as zigzag
for position in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = position % (lowest * 2) # puts it in bounds
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __a ( SCREAMING_SNAKE_CASE ) -> dict[int, str]:
'''simple docstring'''
__UpperCAmelCase = {}
for key_guess in range(1 , len(SCREAMING_SNAKE_CASE ) ): # tries every key
__UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_A : str ={
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( _a , _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = StableUnCLIPPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
a__ = False
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = 32
__UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase__ , num_layers=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
__UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL()
__UpperCAmelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> List[Any]:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowercase__ )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = pipe('''anime turle''' , generator=lowercase__ , output_type='''np''' )
__UpperCAmelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowercase : Optional[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : int = logging.get_logger(__name__)
A_ : str = {'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class A_ ( _a ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["input_ids", "attention_mask"]
a__ = None
def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ) -> Dict:
super().__init__(
lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , )
__UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space:
__UpperCAmelCase = getattr(lowercase__ , pre_tok_state.pop('''type''' ) )
__UpperCAmelCase = add_prefix_space
__UpperCAmelCase = pre_tok_class(**lowercase__ )
__UpperCAmelCase = add_prefix_space
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._batch_encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , *lowercase__ , **lowercase__ ) -> BatchEncoding:
__UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowercase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
''' pretokenized inputs.''' )
return super()._encode_plus(*lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None ) -> Tuple[str]:
__UpperCAmelCase = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] )
if len(lowercase__ ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 333 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__UpperCamelCase :Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCamelCase :List[str] = ''''''
else:
__UpperCamelCase :Any = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase :str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__UpperCamelCase :Any = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase :Tuple = in_proj_weight[
: config.hidden_size, :
]
__UpperCamelCase :Optional[int] = in_proj_bias[: config.hidden_size]
__UpperCamelCase :int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase :Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase :int = in_proj_weight[
-config.hidden_size :, :
]
__UpperCamelCase :int = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = dct.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = val
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase :Optional[int] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
__UpperCamelCase :List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
__UpperCamelCase :List[str] = 8
# set labels if required
if not base_model:
__UpperCamelCase :int = 1_000
__UpperCamelCase :int = '''huggingface/label-files'''
__UpperCamelCase :List[str] = '''imagenet-1k-id2label.json'''
__UpperCamelCase :Optional[Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase :Dict = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__UpperCamelCase :str = idalabel
__UpperCamelCase :Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__UpperCamelCase :str = 384
__UpperCamelCase :str = 1_536
__UpperCamelCase :List[Any] = 12
__UpperCamelCase :Any = 6
# load original model from torch hub
__UpperCamelCase :Dict = torch.hub.load('''facebookresearch/dino:main''' , SCREAMING_SNAKE_CASE )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCamelCase :List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load HuggingFace model
if base_model:
__UpperCamelCase :List[Any] = ViTModel(SCREAMING_SNAKE_CASE , add_pooling_layer=SCREAMING_SNAKE_CASE ).eval()
else:
__UpperCamelCase :Any = ViTForImageClassification(SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by ViTImageProcessor
__UpperCamelCase :str = ViTImageProcessor()
__UpperCamelCase :Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase :List[Any] = encoding['''pixel_values''']
__UpperCamelCase :Optional[int] = model(SCREAMING_SNAKE_CASE )
if base_model:
__UpperCamelCase :Optional[Any] = original_model(SCREAMING_SNAKE_CASE )
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
__UpperCamelCase :Tuple = original_model(SCREAMING_SNAKE_CASE )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
__lowercase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 43 |
import math
import sys
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
__UpperCAmelCase = [-1] * (number + 1)
__UpperCAmelCase = 0
for i in range(1 , number + 1 ):
__UpperCAmelCase = sys.maxsize
__UpperCAmelCase = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
for j in range(1 , root + 1 ):
__UpperCAmelCase = 1 + answers[i - (j**2)]
__UpperCAmelCase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : Any = {'vocab_file': 'spiece.model'}
_a : Dict = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_a : List[str] = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
_a : str = '▁'
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="</s>" , a__="<unk>" , a__="<pad>" , a__=100 , a__=None , a__ = None , a__=True , **a__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : List[Any] = [F"<extra_id_{i}>" for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase : Union[str, Any] = len(set(filter(lambda a__ : bool("""extra_id""" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
_lowerCAmelCase : str = legacy
_lowerCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
_lowerCAmelCase : Any = vocab_file
_lowerCAmelCase : Any = extra_ids
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def __A ( a__ , a__ , a__ ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_lowerCAmelCase : List[Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , a__ , )
return max_model_length
@property
def __A ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def __A ( self ):
return list(
set(filter(lambda a__ : bool(re.search(r"""<extra_id_\d+>""" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self ):
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def __A ( self , a__ ):
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase : Optional[Any] = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self ):
_lowerCAmelCase : Tuple = self.__dict__.copy()
_lowerCAmelCase : List[str] = None
return state
def __setstate__( self , a__ ):
_lowerCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Any = {}
_lowerCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , a__ , **a__ ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_lowerCAmelCase : Union[str, Any] = SPIECE_UNDERLINE + text.replace(a__ , """ """ )
return super().tokenize(a__ , **a__ )
def __A ( self , a__ , **a__ ):
if not self.legacy:
_lowerCAmelCase : int = text.startswith(a__ )
if is_first:
_lowerCAmelCase : Tuple = text[1:]
_lowerCAmelCase : List[Any] = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(a__ ):
_lowerCAmelCase : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __A ( self , a__ ):
if token.startswith("""<extra_id_""" ):
_lowerCAmelCase : Any = re.match(r"""<extra_id_(\d+)>""" , a__ )
_lowerCAmelCase : Dict = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def __A ( self , a__ ):
if index < self.sp_model.get_piece_size():
_lowerCAmelCase : Union[str, Any] = self.sp_model.IdToPiece(a__ )
else:
_lowerCAmelCase : Union[str, Any] = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : List[str] = """"""
_lowerCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : Union[str, Any] = []
else:
current_sub_tokens.append(a__ )
_lowerCAmelCase : Tuple = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , """wb""" ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 44 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A_ : Tuple = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase = b.T
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=1 )
__UpperCAmelCase = np.sum(np.square(SCREAMING_SNAKE_CASE ) , axis=0 )
__UpperCAmelCase = np.matmul(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = x.reshape(-1 , 3 )
__UpperCAmelCase = squared_euclidean_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.argmin(SCREAMING_SNAKE_CASE , axis=1 )
class A_ ( _a ):
'''simple docstring'''
a__ = ["pixel_values"]
def __init__(self , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = True , **lowercase__ , ) -> None:
super().__init__(**lowercase__ )
__UpperCAmelCase = size if size is not None else {'''height''': 256, '''width''': 256}
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = np.array(lowercase__ ) if clusters is not None else None
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_normalize
__UpperCAmelCase = do_color_quantize
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ) -> np.ndarray:
__UpperCAmelCase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
lowercase__ , size=(size['''height'''], size['''width''']) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , ) -> np.ndarray:
__UpperCAmelCase = rescale(image=lowercase__ , scale=1 / 127.5 , data_format=lowercase__ )
__UpperCAmelCase = image - 1
return image
def lowerCAmelCase_ (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> PIL.Image.Image:
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(lowercase__ )
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__UpperCAmelCase = clusters if clusters is not None else self.clusters
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__ ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__UpperCAmelCase = np.array(lowercase__ )
__UpperCAmelCase = color_quantize(lowercase__ , lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__UpperCAmelCase = images.shape[0]
__UpperCAmelCase = images.reshape(lowercase__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__UpperCAmelCase = list(lowercase__ )
else:
__UpperCAmelCase = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
__UpperCAmelCase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 333 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/mobilenet_v1_1.0_224": "https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json",
"google/mobilenet_v1_0.75_192": "https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 'mobilenet_v1'
def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ):
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
__a = num_channels
__a = image_size
__a = depth_multiplier
__a = min_depth
__a = hidden_act
__a = tf_padding
__a = classifier_dropout_prob
__a = initializer_range
__a = layer_norm_eps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = version.parse('1.11' )
@property
def __UpperCAmelCase ( self ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __UpperCAmelCase ( self ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __UpperCAmelCase ( self ):
return 1E-4
| 45 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : Optional[int] = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ['PoolFormerFeatureExtractor']
A_ : Dict = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
A_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 333 | 0 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'encodec'
def __init__( self , lowercase=[1.5, 3.0, 6.0, 12.0, 24.0] , lowercase=24_000 , lowercase=1 , lowercase=False , lowercase=None , lowercase=None , lowercase=128 , lowercase=32 , lowercase=1 , lowercase=[8, 5, 4, 2] , lowercase="weight_norm" , lowercase=7 , lowercase=7 , lowercase=3 , lowercase=2 , lowercase=True , lowercase="reflect" , lowercase=2 , lowercase=2 , lowercase=1.0 , lowercase=1_024 , lowercase=None , lowercase=True , **lowercase , ) -> Any:
lowerCAmelCase = target_bandwidths
lowerCAmelCase = sampling_rate
lowerCAmelCase = audio_channels
lowerCAmelCase = normalize
lowerCAmelCase = chunk_length_s
lowerCAmelCase = overlap
lowerCAmelCase = hidden_size
lowerCAmelCase = num_filters
lowerCAmelCase = num_residual_layers
lowerCAmelCase = upsampling_ratios
lowerCAmelCase = norm_type
lowerCAmelCase = kernel_size
lowerCAmelCase = last_kernel_size
lowerCAmelCase = residual_kernel_size
lowerCAmelCase = dilation_growth_rate
lowerCAmelCase = use_causal_conv
lowerCAmelCase = pad_mode
lowerCAmelCase = compress
lowerCAmelCase = num_lstm_layers
lowerCAmelCase = trim_right_ratio
lowerCAmelCase = codebook_size
lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
lowerCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**lowercase )
@property
def _snake_case ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _snake_case ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _snake_case ( self ) -> int:
lowerCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _snake_case ( self ) -> int:
return int(1_000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 46 |
import math
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * power_factor
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =19_01
_SCREAMING_SNAKE_CASE =0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_SCREAMING_SNAKE_CASE =day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_SCREAMING_SNAKE_CASE =day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_SCREAMING_SNAKE_CASE =day - days_per_month[month - 2]
if month > 12:
year += 1
_SCREAMING_SNAKE_CASE =1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 47 |
def __a ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
A_ : Union[str, Any] = generate_large_matrix()
A_ : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase = (left + right) // 2
__UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333 | 0 |
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Optional[int]:
if tokenize_kwargs is None:
lowerCamelCase : List[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" )
lowerCamelCase : Union[str, Any] = truncation
lowerCamelCase : Optional[Any] = tokenize_kwargs
lowerCamelCase : List[str] = {}
if return_tensors is not None:
lowerCamelCase : Tuple = return_tensors
return preprocess_params, {}, postprocess_params
def _lowercase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict[str, GenericTensor]:
lowerCamelCase : int = self.framework
lowerCamelCase : Optional[int] = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
return model_inputs
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : Dict = self.model(**UpperCamelCase__ )
return model_outputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> str:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
| 48 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A_ : List[str] = sys.version_info >= (3, 10)
def __a ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
@dataclass
class A_ :
'''simple docstring'''
a__ = list_field(default=[] )
a__ = list_field(default=[1, 2, 3] )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
a__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A_ :
'''simple docstring'''
a__ = field()
a__ = field()
a__ = field()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field()
a__ = None
a__ = field(default="toto" , metadata={"help": "help message"} )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , lowercase__ ) and yy.get('''choices''' , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowercase__ ) , yy['''type'''](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--bar''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--baz''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--flag''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((__UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowercase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
__UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase_ (self ) -> str:
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
__UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--bar''' , default=lowercase__ , type=lowercase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowercase__ )
__UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
__UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--required_str''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
__UpperCAmelCase = parser.parse_dict(lowercase__ )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_json''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_yaml''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 333 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case :int = logging.get_logger(__name__)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : int = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : str="<unk>" , __SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , __SCREAMING_SNAKE_CASE : str=125 , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__a = [F'<extra_id_{i}>' for i in range(__SCREAMING_SNAKE_CASE)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__a = len(set(filter(lambda __SCREAMING_SNAKE_CASE: bool('''extra_id''' in str(__SCREAMING_SNAKE_CASE)) , __SCREAMING_SNAKE_CASE)))
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''')
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else pad_token
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else eos_token
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else unk_token
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , extra_ids=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = extra_ids
__a = 2**8 # utf is 8 bits
# define special tokens dict
__a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__a = len(self.special_tokens_encoder)
__a = len(__SCREAMING_SNAKE_CASE)
for i, token in enumerate(__SCREAMING_SNAKE_CASE):
__a = self.vocab_size + i - n
__a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__SCREAMING_SNAKE_CASE)) + [1]
return ([0] * len(__SCREAMING_SNAKE_CASE)) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE)) + [1]
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int]):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
''' eos tokens being added.''')
return token_ids
else:
return token_ids + [self.eos_token_id]
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE)
if token_ids_a is None:
return token_ids_a
else:
__a = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE)
return token_ids_a + token_ids_a
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = [chr(__SCREAMING_SNAKE_CASE) for i in text.encode('''utf-8''')]
return tokens
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
if token in self.special_tokens_encoder:
__a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__a = self.added_tokens_encoder[token]
elif len(__SCREAMING_SNAKE_CASE) != 1:
__a = self.unk_token_id
else:
__a = ord(__SCREAMING_SNAKE_CASE) + self._num_special_tokens
return token_id
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
if index in self.special_tokens_decoder:
__a = self.special_tokens_decoder[index]
else:
__a = chr(index - self._num_special_tokens)
return token
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = B''''''
for token in tokens:
if token in self.special_tokens_decoder:
__a = self.special_tokens_decoder[token].encode('''utf-8''')
elif token in self.added_tokens_decoder:
__a = self.special_tokens_decoder[token].encode('''utf-8''')
elif token in self.special_tokens_encoder:
__a = token.encode('''utf-8''')
elif token in self.added_tokens_encoder:
__a = token.encode('''utf-8''')
else:
__a = bytes([ord(__SCREAMING_SNAKE_CASE)])
bstring += tok_string
__a = bstring.decode('''utf-8''' , errors='''ignore''')
return string
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
return ()
| 49 |
import doctest
from collections import deque
import numpy as np
class A_ :
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = [2, 1, 2, -1]
__UpperCAmelCase = [1, 2, 3, 4]
def lowerCAmelCase_ (self ) -> list[float]:
__UpperCAmelCase = len(self.first_signal )
__UpperCAmelCase = len(self.second_signal )
__UpperCAmelCase = max(lowercase__ , lowercase__ )
# create a zero matrix of max_length x max_length
__UpperCAmelCase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__UpperCAmelCase = np.matmul(np.transpose(lowercase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 333 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertJapaneseTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def A_ ( self : Optional[int] ) -> Dict:
super().setUp()
lowerCamelCase__ : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
lowerCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : Tuple , UpperCAmelCase : Dict ) -> Optional[int]:
lowerCamelCase__ : Tuple = 'こんにちは、世界。 \nこんばんは、世界。'
lowerCamelCase__ : Union[str, Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def A_ ( self : List[str] , UpperCAmelCase : List[Any] ) -> str:
lowerCamelCase__ , lowerCamelCase__ : str = self.get_input_output_texts(UpperCAmelCase )
lowerCamelCase__ : int = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def A_ ( self : Dict ) -> List[str]:
pass # TODO add if relevant
def A_ ( self : List[Any] ) -> Dict:
pass # TODO add if relevant
def A_ ( self : Any ) -> Dict:
pass # TODO add if relevant
def A_ ( self : Any ) -> Optional[int]:
lowerCamelCase__ : Optional[Any] = self.tokenizer_class(self.vocab_file )
lowerCamelCase__ : str = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def A_ ( self : Any ) -> Tuple:
lowerCamelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(UpperCAmelCase )
lowerCamelCase__ : Any = 'こんにちは、世界。\nこんばんは、世界。'
lowerCamelCase__ : Union[str, Any] = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase__ : Any = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(UpperCAmelCase , 'wb' ) as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , 'rb' ) as handle:
lowerCamelCase__ : Tuple = pickle.load(UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer_new.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self : Union[str, Any] ) -> Any:
try:
lowerCamelCase__ : Optional[int] = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self : int ) -> Union[str, Any]:
try:
lowerCamelCase__ : Optional[Any] = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self : str ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = MecabTokenizer(do_lower_case=UpperCAmelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def A_ ( self : int ) -> List[str]:
try:
lowerCamelCase__ : Optional[int] = MecabTokenizer(
do_lower_case=UpperCAmelCase , normalize_text=UpperCAmelCase , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def A_ ( self : Dict ) -> Tuple:
lowerCamelCase__ : Any = MecabTokenizer(normalize_text=UpperCAmelCase , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def A_ ( self : List[Any] ) -> Optional[Any]:
lowerCamelCase__ : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。'
lowerCamelCase__ : List[Any] = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(UpperCAmelCase , 'wb' ) as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , 'rb' ) as handle:
lowerCamelCase__ : Optional[int] = pickle.load(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = tokenizer_new.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@require_sudachi
def A_ ( self : Dict ) -> int:
lowerCamelCase__ : Any = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self : Dict ) -> Optional[Any]:
lowerCamelCase__ : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def A_ ( self : Any ) -> Union[str, Any]:
lowerCamelCase__ : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def A_ ( self : str ) -> Optional[int]:
lowerCamelCase__ : List[str] = SudachiTokenizer(do_lower_case=UpperCAmelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase__ : int = SudachiTokenizer(normalize_text=UpperCAmelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def A_ ( self : List[Any] ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = SudachiTokenizer(trim_whitespace=UpperCAmelCase , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(UpperCAmelCase )
lowerCamelCase__ : Any = 'こんにちは、世界。\nこんばんは、世界。'
lowerCamelCase__ : Any = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCamelCase__ : Any = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(UpperCAmelCase , 'wb' ) as handle:
pickle.dump(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , 'rb' ) as handle:
lowerCamelCase__ : Optional[int] = pickle.load(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = tokenizer_new.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@require_jumanpp
def A_ ( self : Tuple ) -> Tuple:
lowerCamelCase__ : List[str] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self : str ) -> Optional[Any]:
lowerCamelCase__ : List[str] = JumanppTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self : Tuple ) -> List[str]:
lowerCamelCase__ : int = JumanppTokenizer(normalize_text=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def A_ ( self : str ) -> Dict:
lowerCamelCase__ : Optional[Any] = JumanppTokenizer(trim_whitespace=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def A_ ( self : Any ) -> Any:
lowerCamelCase__ : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def A_ ( self : List[Any] ) -> int:
lowerCamelCase__ : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
lowerCamelCase__ : int = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase__ : List[Any] = i
lowerCamelCase__ : int = WordpieceTokenizer(vocab=UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def A_ ( self : Tuple ) -> str:
lowerCamelCase__ : int = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
lowerCamelCase__ : List[str] = tokenizer.subword_tokenizer
lowerCamelCase__ : Any = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(UpperCAmelCase , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
lowerCamelCase__ : Optional[int] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(UpperCAmelCase , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def A_ ( self : Dict ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
lowerCamelCase__ : int = tokenizer.encode('ありがとう。' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.encode('どういたしまして。' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = BertJapaneseTokenizer
UpperCAmelCase__ = False
def A_ ( self : Dict ) -> Any:
super().setUp()
lowerCamelCase__ : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
lowerCamelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : List[Any] , **UpperCAmelCase : str ) -> List[str]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **UpperCAmelCase )
def A_ ( self : List[str] , UpperCAmelCase : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : str = 'こんにちは、世界。 \nこんばんは、世界。'
lowerCamelCase__ : List[str] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def A_ ( self : Optional[Any] ) -> List[Any]:
pass # TODO add if relevant
def A_ ( self : Tuple ) -> Union[str, Any]:
pass # TODO add if relevant
def A_ ( self : Optional[Any] ) -> Optional[int]:
pass # TODO add if relevant
def A_ ( self : Tuple ) -> Tuple:
lowerCamelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
lowerCamelCase__ : List[str] = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
UpperCAmelCase , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def A_ ( self : Dict ) -> Any:
lowerCamelCase__ : Any = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
lowerCamelCase__ : Optional[int] = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = i
lowerCamelCase__ : Optional[int] = CharacterTokenizer(vocab=UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def A_ ( self : Any ) -> str:
lowerCamelCase__ : Dict = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
lowerCamelCase__ : List[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : List[str] ) -> Dict:
lowerCamelCase__ : Union[str, Any] = 'cl-tohoku/bert-base-japanese'
lowerCamelCase__ : int = AutoTokenizer.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Tuple ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
lowerCamelCase__ : Optional[Any] = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
| 50 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( _a ):
'''simple docstring'''
a__ = "pegasus"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def lowerCAmelCase_ (self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ (self ) -> int:
return self.d_model
| 333 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
snake_case_ : List[Any] = data_utils.TransfoXLTokenizer
snake_case_ : int = data_utils.TransfoXLCorpus
snake_case_ : List[Any] = data_utils
snake_case_ : int = data_utils
def A (__A : Dict , __A : List[Any] , __A : Union[str, Any] , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__A , '''rb''' ) as fp:
UpperCAmelCase_ = pickle.load(__A , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
UpperCAmelCase_ = corpus.vocab.__dict__
torch.save(__A , __A )
UpperCAmelCase_ = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __A )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__A , __A )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCAmelCase_ = os.path.abspath(__A )
UpperCAmelCase_ = os.path.abspath(__A )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCAmelCase_ = TransfoXLConfig()
else:
UpperCAmelCase_ = TransfoXLConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = TransfoXLLMHeadModel(__A )
UpperCAmelCase_ = load_tf_weights_in_transfo_xl(__A , __A , __A )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
snake_case_ : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 51 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = LongformerTokenizer
a__ = True
a__ = LongformerTokenizerFast
a__ = True
def lowerCAmelCase_ (self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCAmelCase = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def lowerCAmelCase_ (self , **lowercase__ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True)
self.assertListEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokens + [tokenizer.unk_token]
__UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = '''Encode this sequence.'''
__UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase__ , lowercase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
# Testing spaces after special tokens
__UpperCAmelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
__UpperCAmelCase = '''Encode <mask> sequence'''
__UpperCAmelCase = '''Encode <mask>sequence'''
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
pass
def lowerCAmelCase_ (self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
__UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase_ (self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}'''
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
| 333 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A__ :
def __init__( self , A_ ):
'''simple docstring'''
UpperCamelCase : Any = data
UpperCamelCase : Union[str, Any] = [0x6_7_4_5_2_3_0_1, 0xE_F_C_D_A_B_8_9, 0x9_8_B_A_D_C_F_E, 0x1_0_3_2_5_4_7_6, 0xC_3_D_2_E_1_F_0]
@staticmethod
def __UpperCamelCase( A_ , A_ ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0xF_F_F_F_F_F_F_F
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64)
UpperCamelCase : List[str] = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def __UpperCamelCase( self ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Tuple = list(struct.unpack(">16L" , A_ ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCamelCase : Optional[int] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = self.padding()
UpperCamelCase : List[str] = self.split_blocks()
for block in self.blocks:
UpperCamelCase : Tuple = self.expand_block(A_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCamelCase : Any = (b & c) | ((~b) & d)
UpperCamelCase : List[str] = 0x5_A_8_2_7_9_9_9
elif 20 <= i < 40:
UpperCamelCase : Tuple = b ^ c ^ d
UpperCamelCase : Optional[int] = 0x6_E_D_9_E_B_A_1
elif 40 <= i < 60:
UpperCamelCase : Optional[int] = (b & c) | (b & d) | (c & d)
UpperCamelCase : Optional[Any] = 0x8_F_1_B_B_C_D_C
elif 60 <= i < 80:
UpperCamelCase : List[str] = b ^ c ^ d
UpperCamelCase : List[Any] = 0xC_A_6_2_C_1_D_6
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Union[str, Any] = (
self.rotate(A_ , 5 ) + f + e + k + expanded_block[i] & 0xF_F_F_F_F_F_F_F,
a,
self.rotate(A_ , 30 ),
c,
d,
)
UpperCamelCase : Tuple = (
self.h[0] + a & 0xF_F_F_F_F_F_F_F,
self.h[1] + b & 0xF_F_F_F_F_F_F_F,
self.h[2] + c & 0xF_F_F_F_F_F_F_F,
self.h[3] + d & 0xF_F_F_F_F_F_F_F,
self.h[4] + e & 0xF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h )
def A_ ( ) -> Any:
UpperCamelCase : List[Any] = b"Test String"
assert SHAaHash(_lowerCAmelCase ).final_hash() == hashlib.shaa(_lowerCAmelCase ).hexdigest() # noqa: S324
def A_ ( ) -> Any:
UpperCamelCase : Tuple = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
UpperCamelCase : Tuple = parser.parse_args()
UpperCamelCase : Union[str, Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
UpperCamelCase : str = f.read()
else:
UpperCamelCase : int = bytes(_lowerCAmelCase , "utf-8" )
print(SHAaHash(_lowerCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 52 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( _a ):
'''simple docstring'''
a__ = (IPNDMScheduler,)
a__ = (("num_inference_steps", 50),)
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
__UpperCAmelCase = {'''num_train_timesteps''': 1_000}
config.update(**lowercase__ )
return config
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Any:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self , lowercase__=0 , **lowercase__ ) -> Optional[int]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
if time_step is None:
__UpperCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__UpperCAmelCase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ (self , **lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.scheduler_classes[0]
__UpperCAmelCase = self.get_scheduler_config(**lowercase__ )
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = 10
__UpperCAmelCase = self.dummy_model()
__UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__UpperCAmelCase = model(lowercase__ , lowercase__ )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
return sample
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = dict(self.forward_default_kwargs )
__UpperCAmelCase = kwargs.pop('''num_inference_steps''' , lowercase__ )
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase = self.get_scheduler_config()
__UpperCAmelCase = scheduler_class(**lowercase__ )
__UpperCAmelCase = self.dummy_sample
__UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps''' ):
__UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__UpperCAmelCase = dummy_past_residuals[:]
__UpperCAmelCase = scheduler.timesteps[5]
__UpperCAmelCase = scheduler.timesteps[6]
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
__UpperCAmelCase = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ (self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase__ , time_step=lowercase__ )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = self.full_loop()
__UpperCAmelCase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 333 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : Union[str, Any] ) -> Union[str, Any]: # noqa: E741
"""simple docstring"""
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = 0
__UpperCamelCase = [0] * n
__UpperCamelCase = [False] * n
__UpperCamelCase = [False] * n
def dfs(__lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Any ):
if parent == root:
out_edge_count += 1
__UpperCamelCase = True
__UpperCamelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCamelCase = dfs(__lowercase , __lowercase , __lowercase , __lowercase )
__UpperCamelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCamelCase = True
# AP found via cycle
if at == low[to]:
__UpperCamelCase = True
else:
__UpperCamelCase = min(low[at] , __lowercase )
return out_edge_count
for i in range(__lowercase ):
if not visited[i]:
__UpperCamelCase = 0
__UpperCamelCase = dfs(__lowercase , __lowercase , -1 , __lowercase )
__UpperCamelCase = out_edge_count > 1
for x in range(len(__lowercase ) ):
if is_art[x] is True:
print(__lowercase )
# Adjacency list of graph
a__ : int ={
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 53 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=0.1 , lowercase__=0.1 , lowercase__=224 , lowercase__=1_000 , lowercase__=[3, 3, 6, 4] , lowercase__=[48, 56, 112, 220] , ) -> int:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = num_labels
__UpperCAmelCase = image_size
__UpperCAmelCase = layer_depths
__UpperCAmelCase = embed_dims
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ (self ) -> Optional[Any]:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> int:
__UpperCAmelCase = SwiftFormerModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
__UpperCAmelCase = SwiftFormerForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ (self ) -> Optional[int]:
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = self.prepare_config_and_inputs()
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = SwiftFormerModelTester(self )
__UpperCAmelCase = ConfigTester(
self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase_ (self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def lowerCAmelCase_ (self ) -> List[Any]:
pass
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(lowercase__ )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def lowerCAmelCase_ (self ) -> Any:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = SwiftFormerModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def lowerCAmelCase_ (self ) -> List[str]:
pass
def lowerCAmelCase_ (self ) -> Union[str, Any]:
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ ):
__UpperCAmelCase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = 8
self.assertEqual(len(lowercase__ ) , lowercase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
def _config_zero_init(lowercase__ ):
__UpperCAmelCase = copy.deepcopy(lowercase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase__ , lowercase__ , 1E-10 )
if isinstance(getattr(lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ):
__UpperCAmelCase = _config_zero_init(getattr(lowercase__ , lowercase__ ) )
setattr(lowercase__ , lowercase__ , lowercase__ )
return configs_no_init
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase_ (self ) -> Optional[Any]:
pass
def __a ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ (self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(lowercase__ )
__UpperCAmelCase = self.default_image_processor
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=lowercase__ , return_tensors='''pt''' ).to(lowercase__ )
# forward pass
with torch.no_grad():
__UpperCAmelCase = model(**lowercase__ )
# verify the logits
__UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
__UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 ) )
| 333 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[Any]=3_2 , UpperCAmelCase__ : Dict=1_6 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Any=3_2 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=[0, 1, 2, 3] , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : str=3_7 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[Any]=[1, 3_8_4, 2_4, 2_4] , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=None , ) -> List[Any]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = backbone_out_indices
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = backbone_featmap_shape
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [9_6, 1_9_2, 3_8_4, 7_6_8],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=UpperCAmelCase__ , backbone_featmap_shape=self.backbone_featmap_shape , )
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = DPTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DPTForDepthEstimation(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = DPTForSemanticSegmentation(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Union[str, Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
snake_case__ : Any = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] = False
snake_case__ : List[Any] = False
snake_case__ : Optional[int] = False
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
__SCREAMING_SNAKE_CASE = DPTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
pass
def UpperCAmelCase_ ( self : Any ) -> Dict:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
if model_class in get_values(UpperCAmelCase__ ):
continue
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ).loss
loss.backward()
def UpperCAmelCase_ ( self : Dict ) -> str:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
if model_class in get_values(UpperCAmelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
__SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.gradient_checkpointing_enable()
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ).loss
loss.backward()
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = _config_zero_init(UpperCAmelCase__ )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ )
# Skip the check for the backbone
__SCREAMING_SNAKE_CASE = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__SCREAMING_SNAKE_CASE = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> str:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__SCREAMING_SNAKE_CASE = DPTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = "add"
with self.assertRaises(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = DPTForDepthEstimation(UpperCAmelCase__ )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> str:
__SCREAMING_SNAKE_CASE = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__SCREAMING_SNAKE_CASE = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = outputs.predicted_depth
# verify the predicted depth
__SCREAMING_SNAKE_CASE = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , UpperCAmelCase__ , atol=1E-4 ) )
| 54 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : str = logging.get_logger(__name__)
A_ : str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : int = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 333 | 0 |
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = [[0 for _ in range(UpperCAmelCase_ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCamelCase_ = 1
for n in range(m + 1 ):
for k in range(1 , UpperCAmelCase_ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
a_ : Optional[int] = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
a_ : str = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 55 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
A_ : Tuple = logging.get_logger(__name__)
class A_ ( _a ):
'''simple docstring'''
a__ = "linear"
a__ = "cosine"
a__ = "cosine_with_restarts"
a__ = "polynomial"
a__ = "constant"
a__ = "constant_with_warmup"
a__ = "piecewise_constant"
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Tuple:
'''simple docstring'''
return LambdaLR(SCREAMING_SNAKE_CASE , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> Union[str, Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1.0 , SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = {}
__UpperCAmelCase = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
__UpperCAmelCase , __UpperCAmelCase = rule_str.split(''':''' )
__UpperCAmelCase = int(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = float(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = value
__UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def rule_func(SCREAMING_SNAKE_CASE ) -> float:
__UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCAmelCase = create_rules_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.5 , SCREAMING_SNAKE_CASE = -1 ) -> int:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1e-7 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(SCREAMING_SNAKE_CASE ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE ) / float(max(1 , SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCAmelCase = lr_init - lr_end
__UpperCAmelCase = num_training_steps - num_warmup_steps
__UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = -1 , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = SchedulerType(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE , step_rules=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , num_cycles=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , power=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE , )
return schedule_func(
SCREAMING_SNAKE_CASE , num_warmup_steps=SCREAMING_SNAKE_CASE , num_training_steps=SCREAMING_SNAKE_CASE , last_epoch=SCREAMING_SNAKE_CASE )
| 333 | 0 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
assert isinstance(__UpperCAmelCase, __UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''', [False, True] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> int:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ = SqlDatasetReader(
'''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase, keep_in_memory=__UpperCAmelCase ).read()
_check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''', [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
], )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
snake_case_ = features.copy() if features else default_expected_features
snake_case_ = (
Features({feature: Value(__UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, features=__UpperCAmelCase, cache_dir=__UpperCAmelCase ).read()
_check_sql_dataset(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con:
snake_case_ = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' )
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read()
SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=1 ).write()
snake_case_ = iter_sql_file(__UpperCAmelCase )
snake_case_ = iter_sql_file(__UpperCAmelCase )
for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' )
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read()
SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=2 ).write()
snake_case_ = iter_sql_file(__UpperCAmelCase )
snake_case_ = iter_sql_file(__UpperCAmelCase )
for rowa, rowa in zip(__UpperCAmelCase, __UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
snake_case_ = tmp_path / '''cache'''
snake_case_ = os.path.join(__UpperCAmelCase, '''tmp.sql''' )
snake_case_ = SqlDatasetReader('''dataset''', '''sqlite:///''' + sqlite_path, cache_dir=__UpperCAmelCase ).read()
with pytest.raises(__UpperCAmelCase ):
SqlDatasetWriter(__UpperCAmelCase, '''dataset''', '''sqlite:///''' + output_sqlite_path, num_proc=0 ).write()
| 56 |
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
'''simple docstring'''
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = [[0] * n for i in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = y_points[i]
for i in range(2 , SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 0 |
"""simple docstring"""
from ....utils import logging
A : Optional[int] = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=20_48 ):
__lowerCAmelCase = config.__dict__
__lowerCAmelCase = modal_hidden_size
if num_labels:
__lowerCAmelCase = num_labels
| 57 |
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
# edges = list of graph's edges
__UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase , __UpperCAmelCase = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333 | 0 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowercase_ = 300 # TEMPERATURE (unit = K)
def lowerCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ) ->float:
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
A_ : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
A_ : int = ['a', 'b', 'c', 'd', 'e']
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = start
# add current to visited
visited.append(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__UpperCAmelCase = topological_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
A_ : Tuple = topological_sort('a', [], [])
print(sort)
| 333 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None , ):
snake_case : int = {}
if train_file is not None:
snake_case : List[Any] = [train_file]
if eval_file is not None:
snake_case : Optional[int] = [eval_file]
if test_file is not None:
snake_case : Any = [test_file]
snake_case : int = datasets.load_dataset("csv" , data_files=__lowerCamelCase )
snake_case : str = list(ds[list(files.keys() )[0]].features.keys() )
snake_case : int = features_name.pop(__lowerCamelCase )
snake_case : str = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case : str = {label: i for i, label in enumerate(__lowerCamelCase )}
snake_case : List[Any] = tokenizer.model_input_names
snake_case : List[Any] = {}
if len(__lowerCamelCase ) == 1:
for k in files.keys():
snake_case : Tuple = ds[k].map(
lambda __lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) , batched=__lowerCamelCase , )
elif len(__lowerCamelCase ) == 2:
for k in files.keys():
snake_case : List[Any] = ds[k].map(
lambda __lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , ) , batched=__lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case : Dict = {k: v for k, v in ex.items() if k in input_names}
snake_case : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case : str = {k: v for k, v in ex.items() if k in input_names}
snake_case : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case : str = {k: v for k, v in ex.items() if k in input_names}
snake_case : List[str] = labelaid[ex[label_name]]
yield (d, label)
snake_case : int = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case : Tuple = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case : Optional[int] = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCamelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
A__ : int = field(metadata={"help": "Which column contains the label"} )
A__ : str = field(default=A_ ,metadata={"help": "The path of the training file"} )
A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the development file"} )
A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the test file"} )
A__ : int = field(
default=1_28 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
A__ : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class UpperCAmelCase :
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case , snake_case , snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case , snake_case , snake_case , snake_case : Tuple = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCamelCase ) , labelaid=__lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case : int = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__lowerCamelCase : EvalPrediction ) -> Dict:
snake_case : Optional[int] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case : int = TFTrainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case : Any = trainer.evaluate()
snake_case : List[Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(__lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 59 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333 | 0 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _snake_case ( _snake_case : int , _snake_case : int ):
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _snake_case ( _snake_case : List[Any] , _snake_case : Dict , _snake_case : Tuple , _snake_case : Optional[Any] ):
lowerCAmelCase : Optional[Any] = tmp_path / '''cache'''
lowerCAmelCase : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : int = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_sql_dataset(_snake_case , _snake_case )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : Dict ):
lowerCAmelCase : Optional[int] = tmp_path / '''cache'''
lowerCAmelCase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase : str = features.copy() if features else default_expected_features
lowerCAmelCase : Union[str, Any] = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : List[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=_snake_case , cache_dir=_snake_case ).read()
_check_sql_dataset(_snake_case , _snake_case )
def _snake_case ( _snake_case : Union[str, Any] ):
with contextlib.closing(sqlitea.connect(_snake_case ) ) as con:
lowerCAmelCase : List[str] = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def _snake_case ( _snake_case : List[str] , _snake_case : Tuple , _snake_case : Optional[int] ):
lowerCAmelCase : Any = tmp_path / '''cache'''
lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''tmp.sql''' )
lowerCAmelCase : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_snake_case ).read()
SqlDatasetWriter(_snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
lowerCAmelCase : Optional[Any] = iter_sql_file(_snake_case )
lowerCAmelCase : Dict = iter_sql_file(_snake_case )
for rowa, rowa in zip(_snake_case , _snake_case ):
assert rowa == rowa
@require_sqlalchemy
def _snake_case ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : int ):
lowerCAmelCase : Union[str, Any] = tmp_path / '''cache'''
lowerCAmelCase : Dict = os.path.join(_snake_case , '''tmp.sql''' )
lowerCAmelCase : Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_snake_case ).read()
SqlDatasetWriter(_snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
lowerCAmelCase : Any = iter_sql_file(_snake_case )
lowerCAmelCase : str = iter_sql_file(_snake_case )
for rowa, rowa in zip(_snake_case , _snake_case ):
assert rowa == rowa
@require_sqlalchemy
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : str ):
lowerCAmelCase : Union[str, Any] = tmp_path / '''cache'''
lowerCAmelCase : List[Any] = os.path.join(_snake_case , '''tmp.sql''' )
lowerCAmelCase : Union[str, Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_snake_case ).read()
with pytest.raises(_snake_case ):
SqlDatasetWriter(_snake_case , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 60 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> Dict:
'''simple docstring'''
model.train()
__UpperCAmelCase = model(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = F.mse_loss(SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
set_seed(4_2 )
__UpperCAmelCase = RegressionModel()
__UpperCAmelCase = deepcopy(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
model.to(accelerator.device )
if sched:
__UpperCAmelCase = AdamW(params=model.parameters() , lr=1e-3 )
__UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
__UpperCAmelCase = LambdaLR(SCREAMING_SNAKE_CASE , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
# Use a single batch
__UpperCAmelCase , __UpperCAmelCase = next(iter(SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
__UpperCAmelCase = ddp_input[torch.randperm(len(SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def __a ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = Accelerator(
split_batches=SCREAMING_SNAKE_CASE , dispatch_batches=SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_training_setup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase , __UpperCAmelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) )
__UpperCAmelCase , __UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(SCREAMING_SNAKE_CASE ):
step_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
__UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_3_3_7 + iteration )
GradientState._reset_state()
def __a ( ) -> str:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = RegressionDataset(length=8_0 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase = RegressionDataset(length=9_6 )
__UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE , batch_size=1_6 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if iteration < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(SCREAMING_SNAKE_CASE )
if batch_num < len(SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 333 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.