code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
def __a ( A ):
'''simple docstring'''
lowercase__ = str(A )
return len(A ) == 9 and set(A ) == set("123456789" )
def __a ( ):
'''simple docstring'''
for base_num in range(99_99 , 49_99 , -1 ):
lowercase__ = 10_00_02 * base_num
if is_9_pandigital(A ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
lowercase__ = 1_00_20_03 * base_num
if is_9_pandigital(A ):
return candidate
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 668 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668 | 1 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = None
snake_case_ = BloomTokenizerFast
snake_case_ = BloomTokenizerFast
snake_case_ = True
snake_case_ = False
snake_case_ = "tokenizer_file"
snake_case_ = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_rust_tokenizer()
lowercase__ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowercase__ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
lowercase__ = tokenizer.batch_encode_plus(_UpperCAmelCase )["input_ids"]
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase=6 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase__ = "This is a simple input"
lowercase__ = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ = ("This is a simple input", "This is a pair")
lowercase__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(_UpperCAmelCase, max_length=_UpperCAmelCase )
tokenizer_r.encode_plus(_UpperCAmelCase, max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase, max_length=_UpperCAmelCase )
tokenizer_r.encode(_UpperCAmelCase, max_length=_UpperCAmelCase )
tokenizer_r.batch_encode_plus(_UpperCAmelCase, max_length=_UpperCAmelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
lowercase__ = None # Hotfixing padding = None
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Simple input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(_UpperCAmelCase, tokenizer_r.encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length" )
# Pair input
self.assertRaises(
_UpperCAmelCase, tokenizer_r.batch_encode_plus, _UpperCAmelCase, max_length=_UpperCAmelCase, padding="max_length", )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_rust_tokenizer()
lowercase__ = load_dataset("xnli", "all_languages", split="test", streaming=_UpperCAmelCase )
lowercase__ = next(iter(_UpperCAmelCase ) )["premise"] # pick up one data
lowercase__ = list(sample_data.values() )
lowercase__ = list(map(tokenizer.encode, _UpperCAmelCase ) )
lowercase__ = [tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase ) for x in output_tokens]
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ), 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ), 1 )
| 668 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "markuplm"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
# additional properties
lowercase__ = max_depth
lowercase__ = max_xpath_tag_unit_embeddings
lowercase__ = max_xpath_subs_unit_embeddings
lowercase__ = tag_pad_id
lowercase__ = subs_pad_id
lowercase__ = xpath_unit_hidden_size
| 668 | 1 |
"""simple docstring"""
import copy
import re
class a__ :
snake_case_ = "hp"
snake_case_ = {}
snake_case_ = None
@classmethod
def snake_case__ ( cls, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = prefix
lowercase__ = defaults
cls.build_naming_info()
@staticmethod
def snake_case__ ( _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return ""
lowercase__ = None
if any(char.isdigit() for char in word ):
raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(_UpperCAmelCase ) + 1 ):
lowercase__ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowercase__ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_UpperCAmelCase ):
lowercase__ = ""
while integer != 0:
lowercase__ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
lowercase__ = 0
while True:
lowercase__ = word + "#" + int_to_alphabetic(_UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
lowercase__ = sword
break
lowercase__ = short_word
lowercase__ = word
return short_word
@staticmethod
def snake_case__ ( _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = param_name.split("_" )
lowercase__ = [TrialShortNamer.shortname_for_word(_UpperCAmelCase, _UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowercase__ = ["", "_"]
for separator in separators:
lowercase__ = separator.join(_UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
lowercase__ = shortname
lowercase__ = param_name
return shortname
return param_name
@staticmethod
def snake_case__ ( _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = TrialShortNamer.shortname_for_key(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = short_name
lowercase__ = param_name
@classmethod
def snake_case__ ( cls ):
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
lowercase__ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
lowercase__ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = info
@classmethod
def snake_case__ ( cls, _UpperCAmelCase ):
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
lowercase__ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowercase__ = cls.NAMING_INFO["short_param"][k]
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowercase__ = 1 if v else 0
lowercase__ = "" if isinstance(_UpperCAmelCase, (int, float) ) else "-"
lowercase__ = F'''{key}{sep}{v}'''
name.append(_UpperCAmelCase )
return "_".join(_UpperCAmelCase )
@classmethod
def snake_case__ ( cls, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowercase__ = []
else:
lowercase__ = repr.split("_" )
lowercase__ = {}
for value in values:
if "-" in value:
lowercase__ , lowercase__ = value.split("-" )
else:
lowercase__ = re.sub("[0-9.]", "", _UpperCAmelCase )
lowercase__ = float(re.sub("[^0-9.]", "", _UpperCAmelCase ) )
lowercase__ = cls.NAMING_INFO["reverse_short_param"][p_k]
lowercase__ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowercase__ = cls.DEFAULTS[k]
return parameters
| 668 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCAmelCase_: Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {"vocab_file": "spiece.model"}
lowerCAmelCase_: Dict = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=True, _UpperCAmelCase=False, _UpperCAmelCase="<s>", _UpperCAmelCase="</s>", _UpperCAmelCase="<unk>", _UpperCAmelCase="<sep>", _UpperCAmelCase="<pad>", _UpperCAmelCase="<cls>", _UpperCAmelCase="<mask>", _UpperCAmelCase=["<eop>", "<eod>"], _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase, remove_space=_UpperCAmelCase, keep_accents=_UpperCAmelCase, bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, )
lowercase__ = 3
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
lowercase__ = jieba
lowercase__ = str.maketrans(" \n", "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def snake_case__ ( self ):
'''simple docstring'''
return len(self.sp_model )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if self.remove_space:
lowercase__ = " ".join(inputs.strip().split() )
else:
lowercase__ = inputs
lowercase__ = outputs.replace("``", "\"" ).replace("''", "\"" )
if not self.keep_accents:
lowercase__ = unicodedata.normalize("NFKD", _UpperCAmelCase )
lowercase__ = "".join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.preprocess_text(_UpperCAmelCase )
lowercase__ = self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase )
lowercase__ = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase, "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip()
return out_string
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1]
return ([0] * len(_UpperCAmelCase )) + [1, 1]
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase, "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def snake_case__ ( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = super()._decode(*_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = text.replace(" ", "" ).replace("\u2582", " " ).replace("\u2583", "\n" )
return text
| 668 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A , A ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668 | 1 |
"""simple docstring"""
def __a ( A ):
'''simple docstring'''
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668 | 1 |
"""simple docstring"""
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
lowercase__ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(A )
if number < 1:
lowercase__ = f'''Input value of [number={number}] must be > 0'''
raise ValueError(A )
lowercase__ = 1
for i in range(1 , A ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 )
lowercase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase, [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a__ ( _a , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class a__ ( unittest.TestCase ):
@property
def snake_case__ ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ort.SessionOptions()
lowercase__ = False
return options
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
lowercase__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", revision="onnx", safety_checker=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = "A red cat sitting on a park bench"
lowercase__ = np.random.RandomState(0 )
lowercase__ = pipe(
prompt=_UpperCAmelCase, image=_UpperCAmelCase, mask_image=_UpperCAmelCase, guidance_scale=7.5, num_inference_steps=10, generator=_UpperCAmelCase, output_type="np", )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.2_514, 0.3_007, 0.3_517, 0.1_790, 0.2_382, 0.3_167, 0.1_944, 0.2_273, 0.2_464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
lowercase__ = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting", subfolder="scheduler", revision="onnx" )
lowercase__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", revision="onnx", scheduler=_UpperCAmelCase, safety_checker=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__ = "A red cat sitting on a park bench"
lowercase__ = np.random.RandomState(0 )
lowercase__ = pipe(
prompt=_UpperCAmelCase, image=_UpperCAmelCase, mask_image=_UpperCAmelCase, guidance_scale=7.5, num_inference_steps=20, generator=_UpperCAmelCase, output_type="np", )
lowercase__ = output.images
lowercase__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.0_086, 0.0_077, 0.0_083, 0.0_093, 0.0_107, 0.0_139, 0.0_094, 0.0_097, 0.0_125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 668 |
"""simple docstring"""
import itertools
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ):
'''simple docstring'''
lowercase__ = 2
while True:
if is_prime(A ):
yield num
num += 1
def __a ( A = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 1 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase_: int = logging.get_logger(__name__)
lowerCAmelCase_: List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
lowerCAmelCase_: Tuple = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __a ( A , A , A , A , A ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase__ = getattr(A , A )
if weight_type is not None:
lowercase__ = getattr(A , A ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == "group" , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(A )[0].split("." )[-2]
lowercase__ = mapped_key.replace("*" , A )
if "weight_g" in name:
lowercase__ = "weight_g"
elif "weight_v" in name:
lowercase__ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
lowercase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = "weight"
else:
lowercase__ = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __a ( A , A , A , A , A ):
'''simple docstring'''
lowercase__ = full_name.split("conv_layers." )[-1]
lowercase__ = name.split("." )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def __a ( A , A , A=None ):
'''simple docstring'''
lowercase__ = torch.load(A )
lowercase__ = WavLMConfigOrig(checkpoint["cfg"] )
lowercase__ = WavLMOrig(A )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
lowercase__ = WavLMConfig.from_pretrained(A )
else:
lowercase__ = WavLMConfig()
lowercase__ = WavLMModel(A )
recursively_load_weights(A , A )
hf_wavlm.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_: List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowerCAmelCase_: Dict = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 668 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class a__ :
def __init__( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
class a__ :
def __init__( self ):
'''simple docstring'''
lowercase__ = None
lowercase__ = None
def __iter__( self ):
'''simple docstring'''
lowercase__ = self.head
while self.head:
yield node.data
lowercase__ = node.next
if node == self.head:
break
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join(str(_UpperCAmelCase ) for item in iter(self ) )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
self.insert_nth(len(self ), _UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
self.insert_nth(0, _UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
lowercase__ = Node(_UpperCAmelCase )
if self.head is None:
lowercase__ = new_node # first node points itself
lowercase__ = lowercase__ = new_node
elif index == 0: # insert at head
lowercase__ = self.head
lowercase__ = lowercase__ = new_node
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = new_node
if index == len(self ) - 1: # insert at tail
lowercase__ = new_node
def snake_case__ ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def snake_case__ ( self ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case__ ( self, _UpperCAmelCase = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
lowercase__ = self.head
if self.head == self.tail: # just one node
lowercase__ = lowercase__ = None
elif index == 0: # delete head node
lowercase__ = self.tail.next.next
lowercase__ = self.head.next
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = temp.next.next
if index == len(self ) - 1: # delete at tail
lowercase__ = temp
return delete_node.data
def snake_case__ ( self ):
'''simple docstring'''
return len(self ) == 0
def __a ( ):
'''simple docstring'''
lowercase__ = CircularLinkedList()
assert len(A ) == 0
assert circular_linked_list.is_empty() is True
assert str(A ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(A ) == i
circular_linked_list.insert_nth(A , i + 1 )
assert str(A ) == "->".join(str(A ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(A ) == "->".join(str(A ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(A ) == "->".join(str(A ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(A ) == "->".join(str(A ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(A ) == "->".join(str(A ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_: Dict = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase_: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 668 | 1 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Mock()
lowercase__ = conn, Mock()
lowercase__ = iter([1, None] )
lowercase__ = lambda A : next(A )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 668 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ), 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ), 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase_: List[str] = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
lowerCAmelCase_: Tuple = dataset.iloc[:, 1:2].values
lowerCAmelCase_: str = dataset.iloc[:, 2].values
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: Dict = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase_: Any = PolynomialFeatures(degree=4)
lowerCAmelCase_: Tuple = poly_reg.fit_transform(X)
lowerCAmelCase_: str = LinearRegression()
pol_reg.fit(X_poly, y)
def __a ( ):
'''simple docstring'''
plt.scatter(A , A , color="red" )
plt.plot(A , pol_reg.predict(poly_reg.fit_transform(A ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 668 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 668 | 1 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCAmelCase_: Any = logging.get_logger(__name__)
lowerCAmelCase_: int = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def __a ( A , A , A , A ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
lowercase__ = TOKENIZER_CLASSES
else:
lowercase__ = {tokenizer_name: getattr(A , tokenizer_name + "Fast" )}
logger.info(f'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
lowercase__ = TOKENIZER_CLASSES[tokenizer_name]
lowercase__ = True
if checkpoint_name is None:
lowercase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowercase__ = [checkpoint_name]
logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
lowercase__ = tokenizer_class.from_pretrained(A , force_download=A )
# Save fast tokenizer
logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
lowercase__ , lowercase__ = checkpoint.split("/" )
lowercase__ = os.path.join(A , A )
elif add_prefix:
lowercase__ = checkpoint
lowercase__ = dump_path
else:
lowercase__ = None
lowercase__ = dump_path
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowercase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowercase__ = file_path.split(A )[-1][0]
if next_char == "/":
lowercase__ = os.path.join(A , A )
lowercase__ = None
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
lowercase__ = tokenizer.save_pretrained(
A , legacy_format=A , filename_prefix=A )
logger.info(f'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(A )
logger.info(f'''=> removing {file_name}''' )
if __name__ == "__main__":
lowerCAmelCase_: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
lowerCAmelCase_: Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 668 |
"""simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_: List[Any] = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Dict = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = ["CLIPFeatureExtractor"]
lowerCAmelCase_: Optional[int] = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: int = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__ ( _a , unittest.TestCase ):
snake_case_ = PriorTransformer
snake_case_ = "hidden_states"
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(_UpperCAmelCase )
lowercase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowercase__ = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase, "set_default_attn_processor" ):
model.set_default_attn_processor()
lowercase__ = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
lowercase__ = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) )
@slow
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = batch_size
lowercase__ = embedding_dim
lowercase__ = num_embeddings
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" )
model.to(_UpperCAmelCase )
lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
lowercase__ = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
| 668 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=_a ):
snake_case_ = ["torch", "scipy"]
def __init__( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(self, ["torch", "scipy"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "scipy"] )
| 668 |
"""simple docstring"""
lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ = len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase_: Optional[Any] = "src/diffusers"
# Matches is_xxx_available()
lowerCAmelCase_: int = re.compile(R"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
lowerCAmelCase_: int = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
lowerCAmelCase_: Tuple = "\n{0} = None\n"
lowerCAmelCase_: Any = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
lowerCAmelCase_: Any = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def __a ( A ):
'''simple docstring'''
lowercase__ = _re_backend.findall(A )
if len(A ) == 0:
return None
return "_and_".join(A )
def __a ( ):
'''simple docstring'''
with open(os.path.join(A , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase__ = f.readlines()
# Get to the point we do the actual imports for type checking
lowercase__ = 0
lowercase__ = {}
# Go through the end of the file
while line_index < len(A ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowercase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(A ) and len(lines[line_index] ) > 1:
lowercase__ = lines[line_index]
lowercase__ = _re_single_line_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(A ) > 0:
lowercase__ = objects
else:
line_index += 1
return backend_specific_objects
def __a ( A , A ):
'''simple docstring'''
if name.isupper():
return DUMMY_CONSTANT.format(A )
elif name.islower():
return DUMMY_FUNCTION.format(A , A )
else:
return DUMMY_CLASS.format(A , A )
def __a ( A=None ):
'''simple docstring'''
if backend_specific_objects is None:
lowercase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowercase__ = {}
for backend, objects in backend_specific_objects.items():
lowercase__ = "[" + ", ".join(f'''"{b}"''' for b in backend.split("_and_" ) ) + "]"
lowercase__ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(A , A ) for o in objects] )
lowercase__ = dummy_file
return dummy_files
def __a ( A=False ):
'''simple docstring'''
lowercase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowercase__ = {"torch": "pt"}
# Locate actual dummy modules and read their content.
lowercase__ = os.path.join(A , "utils" )
lowercase__ = {
backend: os.path.join(A , f'''dummy_{short_names.get(A , A )}_objects.py''' )
for backend in dummy_files.keys()
}
lowercase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(A ):
with open(A , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase__ = f.read()
else:
lowercase__ = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(A , A )}_objects.py as the main '''
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f'''diffusers.utils.dummy_{short_names.get(A , A )}_objects.py. Run `make fix-copies` '''
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase_: Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowerCAmelCase_: List[str] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 668 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ):
'''simple docstring'''
lowercase__ = symbols(A )
lowercase__ = lambdify(A , A )
lowercase__ = lambdify(A , diff(A , A ) )
lowercase__ = starting_point
while True:
if diff_function(A ) != 0:
lowercase__ = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 668 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_: Any = logging.get_logger(__name__)
def __a ( A ):
'''simple docstring'''
lowercase__ = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
lowercase__ = 10_24
lowercase__ = 40_96
lowercase__ = 24
lowercase__ = 16
lowercase__ = [5, 11, 17, 23]
lowercase__ = [2_56, 5_12, 10_24, 10_24]
lowercase__ = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
lowercase__ = 7_68
lowercase__ = [1, 1, 1, 0.5]
lowercase__ = [2_56, 5_12, 7_68, 7_68]
lowercase__ = 1_50
lowercase__ = 16
lowercase__ = (1, 3_84, 3_84)
lowercase__ = False
lowercase__ = "project"
if "ade" in checkpoint_url:
lowercase__ = True
lowercase__ = 7_68
lowercase__ = [1, 1, 1, 0.5]
lowercase__ = 1_50
lowercase__ = 16
lowercase__ = "huggingface/label-files"
lowercase__ = "ade20k-id2label.json"
lowercase__ = json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
lowercase__ = {int(A ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def __a ( A ):
'''simple docstring'''
lowercase__ = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(A , A )
def __a ( A ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
lowercase__ = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
lowercase__ = name.replace("patch_embed" , "" )
if "pos_embed" in name:
lowercase__ = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
lowercase__ = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase__ = name.replace("proj" , "projection" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
lowercase__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
lowercase__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
lowercase__ = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
lowercase__ = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
lowercase__ = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
lowercase__ = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
lowercase__ = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
lowercase__ = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
lowercase__ = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
lowercase__ = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowercase__ = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
lowercase__ = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
lowercase__ = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
lowercase__ = name.replace("conv1" , "convolution1" )
if "conv2" in name:
lowercase__ = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase__ = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase__ = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase__ = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase__ = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase__ = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase__ = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase__ = name.replace("pretrained" , "dpt" )
if "bn" in name:
lowercase__ = name.replace("bn" , "batch_norm" )
if "head" in name:
lowercase__ = name.replace("head" , "head.head" )
if "encoder.norm" in name:
lowercase__ = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
lowercase__ = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
lowercase__ = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
lowercase__ = name.replace(".." , "." )
if "stem.conv" in name:
lowercase__ = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
lowercase__ = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
lowercase__ = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
lowercase__ = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
lowercase__ = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
lowercase__ = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
lowercase__ = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def __a ( A , A ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowercase__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[: config.hidden_size, :]
lowercase__ = in_proj_bias[: config.hidden_size]
lowercase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ = in_proj_bias[-config.hidden_size :]
def __a ( ):
'''simple docstring'''
lowercase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def __a ( A , A , A , A , A ):
'''simple docstring'''
lowercase__ , lowercase__ = get_dpt_config(A )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowercase__ = torch.load(A , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(A )
# rename keys
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(A )
lowercase__ = val
# read in qkv matrices
read_in_q_k_v(A , A )
# load HuggingFace model
lowercase__ = DPTForSemanticSegmentation(A ) if "ade" in checkpoint_url else DPTForDepthEstimation(A )
model.load_state_dict(A )
model.eval()
# Check outputs on an image
lowercase__ = 4_80 if "ade" in checkpoint_url else 3_84
lowercase__ = DPTImageProcessor(size=A )
lowercase__ = prepare_img()
lowercase__ = image_processor(A , return_tensors="pt" )
# forward pass
lowercase__ = model(**A ).logits if "ade" in checkpoint_url else model(**A ).predicted_depth
if show_prediction:
lowercase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=A , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(A ).mkdir(exist_ok=A )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
lowerCAmelCase_: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
lowerCAmelCase_: Optional[int] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 668 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __a ( A = True , *A , **A ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
lowercase__ = False
if main_process_only:
lowercase__ = PartialState().local_process_index == 0
return _tqdm(*A , **A , disable=A )
| 668 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCAmelCase_: Optional[Any] = numpy.array([0, 0])
lowerCAmelCase_: Optional[Any] = numpy.array([0.5, 0.8_660_254])
lowerCAmelCase_: List[str] = numpy.array([1, 0])
lowerCAmelCase_: Any = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __a ( A , A ):
'''simple docstring'''
lowercase__ = initial_vectors
for _ in range(A ):
lowercase__ = iteration_step(A )
return vectors
def __a ( A ):
'''simple docstring'''
lowercase__ = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase__ = vectors[i + 1]
new_vectors.append(A )
lowercase__ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __a ( A , A ):
'''simple docstring'''
lowercase__ = numpy.radians(A )
lowercase__ , lowercase__ = numpy.cos(A ), numpy.sin(A )
lowercase__ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(A , A )
def __a ( A ):
'''simple docstring'''
lowercase__ = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase__ , lowercase__ = zip(*A )
plt.plot(A , A )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_: Any = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 668 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowercase__ = []
for num in range(len(A ) ):
lowercase__ = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ = odd_composites[num] - 2 * i * i
if is_prime(A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A ) == n:
return list_nums
return []
def __a ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 1 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_: Optional[Any] = get_logger(__name__)
lowerCAmelCase_: int = Path(__file__).parent / "model_card_template.md"
lowerCAmelCase_: Optional[int] = uuida().hex
lowerCAmelCase_: Optional[int] = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_: Dict = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_: str = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
def __a ( A = None ):
'''simple docstring'''
lowercase__ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(A , A ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(A , A ):
ua += "; " + user_agent
return ua
def __a ( A , A = None , A = None ):
'''simple docstring'''
if token is None:
lowercase__ = HfFolder.get_token()
if organization is None:
lowercase__ = whoami(A )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def __a ( A , A ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(A , "local_rank" ) and args.local_rank not in [-1, 0]:
return
lowercase__ = args.hub_token if hasattr(A , "hub_token" ) else None
lowercase__ = get_full_repo_name(A , token=A )
lowercase__ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=A , model_name=A , repo_name=A , dataset_name=args.dataset_name if hasattr(A , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(A , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(A , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(A , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(A , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(A , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(A , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(A , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(A , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(A , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(A , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
lowercase__ = os.path.join(args.output_dir , "README.md" )
model_card.save(A )
def __a ( A , A = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase__ = str(Path(A ).as_posix() )
lowercase__ = re.search(r"snapshots/([^/]+)/" , A )
if search is None:
return None
lowercase__ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(A ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_: Optional[int] = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
lowerCAmelCase_: int = os.path.join(hf_cache_home, "diffusers")
def __a ( A = None , A = None ):
'''simple docstring'''
if new_cache_dir is None:
lowercase__ = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase__ = old_diffusers_cache
lowercase__ = Path(A ).expanduser()
lowercase__ = Path(A ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase__ = new_cache_dir / old_blob_path.relative_to(A )
new_blob_path.parent.mkdir(parents=A , exist_ok=A )
os.replace(A , A )
try:
os.symlink(A , A )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_: Tuple = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
lowerCAmelCase_: str = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_: Dict = int(f.read())
except ValueError:
lowerCAmelCase_: int = 0
if cache_version < 1:
lowerCAmelCase_: Optional[int] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_: Optional[Any] = "\n".join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
"the directory exists and can be written to."
)
def __a ( A , A = None ):
'''simple docstring'''
if variant is not None:
lowercase__ = weights_name.split("." )
lowercase__ = splits[:-1] + [variant] + splits[-1:]
lowercase__ = ".".join(A )
return weights_name
def __a ( A , *,
A , A , A , A , A , A , A , A , A , A , A=None , ):
'''simple docstring'''
lowercase__ = str(A )
if os.path.isfile(A ):
return pretrained_model_name_or_path
elif os.path.isdir(A ):
if os.path.isfile(os.path.join(A , A ) ):
# Load from a PyTorch checkpoint
lowercase__ = os.path.join(A , A )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(A , A , A ) ):
lowercase__ = os.path.join(A , A , A )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(A ).base_version ) >= version.parse("0.20.0" )
):
try:
lowercase__ = hf_hub_download(
A , filename=_add_variant(A , A ) , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , user_agent=A , subfolder=A , revision=revision or commit_hash , )
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , A , )
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(A , A )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(A , A )}\' so that the correct variant file can be added.''' , A , )
try:
# 2. Load model file as usual
lowercase__ = hf_hub_download(
A , filename=A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , user_agent=A , subfolder=A , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 668 |
"""simple docstring"""
import os
import sys
lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase_: Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
| 668 | 1 |
"""simple docstring"""
def __a ( A , A ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def __a ( ):
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 668 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668 | 1 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A , A ):
'''simple docstring'''
if len(A ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(A )
or left < -len(A )
or right >= len(A )
or right < -len(A )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
lowercase__ = (left + right) >> 1 # the middle
lowercase__ = find_max(A , A , A ) # find max in range[left, mid]
lowercase__ = find_max(A , mid + 1 , A ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 668 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = RemBertConfig.from_json_file(A )
print("Building PyTorch model from configuration: {}".format(str(A ) ) )
lowercase__ = RemBertModel(A )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A , A , A )
# Save pytorch-model
print("Save PyTorch model to {}".format(A ) )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowerCAmelCase_: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCAmelCase_: str = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 668 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "markuplm"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
# additional properties
lowercase__ = max_depth
lowercase__ = max_xpath_tag_unit_embeddings
lowercase__ = max_xpath_subs_unit_embeddings
lowercase__ = tag_pad_id
lowercase__ = subs_pad_id
lowercase__ = xpath_unit_hidden_size
| 668 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __a ( A ):
'''simple docstring'''
lowercase__ , lowercase__ = image.size
lowercase__ , lowercase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowercase__ = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
lowercase__ = np.array(A ).astype(np.floataa ) / 255.0
lowercase__ = image[None].transpose(0 , 3 , 1 , 2 )
lowercase__ = torch.from_numpy(A )
return 2.0 * image - 1.0
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=_UpperCAmelCase, unet=_UpperCAmelCase, scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self, _UpperCAmelCase = None, _UpperCAmelCase = 1, _UpperCAmelCase = 100, _UpperCAmelCase = 0.0, _UpperCAmelCase = None, _UpperCAmelCase = "pil", _UpperCAmelCase = True, ):
'''simple docstring'''
if isinstance(_UpperCAmelCase, PIL.Image.Image ):
lowercase__ = 1
elif isinstance(_UpperCAmelCase, torch.Tensor ):
lowercase__ = image.shape[0]
else:
raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_UpperCAmelCase )}''' )
if isinstance(_UpperCAmelCase, PIL.Image.Image ):
lowercase__ = preprocess(_UpperCAmelCase )
lowercase__ , lowercase__ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowercase__ = (batch_size, self.unet.config.in_channels // 2, height, width)
lowercase__ = next(self.unet.parameters() ).dtype
lowercase__ = randn_tensor(_UpperCAmelCase, generator=_UpperCAmelCase, device=self.device, dtype=_UpperCAmelCase )
lowercase__ = image.to(device=self.device, dtype=_UpperCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_UpperCAmelCase, device=self.device )
lowercase__ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for t in self.progress_bar(_UpperCAmelCase ):
# concat latents and low resolution image in the channel dimension.
lowercase__ = torch.cat([latents, image], dim=1 )
lowercase__ = self.scheduler.scale_model_input(_UpperCAmelCase, _UpperCAmelCase )
# predict the noise residual
lowercase__ = self.unet(_UpperCAmelCase, _UpperCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
# decode the image latents with the VQVAE
lowercase__ = self.vqvae.decode(_UpperCAmelCase ).sample
lowercase__ = torch.clamp(_UpperCAmelCase, -1.0, 1.0 )
lowercase__ = image / 2 + 0.5
lowercase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 668 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 1 |
"""simple docstring"""
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCAmelCase_: List[Any] = Mapping[str, np.ndarray]
lowerCAmelCase_: Tuple = Mapping[str, Any] # Is a nested dict.
lowerCAmelCase_: str = 0.01
@dataclasses.dataclass(frozen=_a )
class a__ :
snake_case_ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
snake_case_ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
snake_case_ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
snake_case_ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
snake_case_ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
snake_case_ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
snake_case_ = None
# Templates used to generate this protein (prediction-only)
snake_case_ = None
# Chain corresponding to each parent
snake_case_ = None
def __a ( A ):
'''simple docstring'''
lowercase__ = r"(\[[A-Z]+\]\n)"
lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0]
lowercase__ = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
lowercase__ = ["N", "CA", "C"]
lowercase__ = None
lowercase__ = None
lowercase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ = g[1][0].strip()
for i in range(len(A ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ = "X" # FIXME: strings are immutable
lowercase__ = np.array(
[residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ = []
for axis in range(3 ):
tertiary.append(list(map(A , g[1][axis].split() ) ) )
lowercase__ = np.array(A )
lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
lowercase__ = np.zeros(
(
len(A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , )
def __a ( A , A = 0 ):
'''simple docstring'''
lowercase__ = []
lowercase__ = prot.remark
if remark is not None:
pdb_headers.append(f'''REMARK {remark}''' )
lowercase__ = prot.parents
lowercase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ = [p for i, p in zip(A , A ) if i == chain_id]
if parents is None or len(A ) == 0:
lowercase__ = ["N/A"]
pdb_headers.append(f'''PARENT {' '.join(A )}''' )
return pdb_headers
def __a ( A , A ):
'''simple docstring'''
lowercase__ = []
lowercase__ = pdb_str.split("\n" )
lowercase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f'''REMARK {remark}''' )
lowercase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ = []
if prot.parents_chain_index is not None:
lowercase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(A ) , [] )
parent_dict[str(A )].append(A )
lowercase__ = max([int(A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ = parent_dict.get(str(A ) , ["N/A"] )
parents_per_chain.append(A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ = [["N/A"]]
def make_parent_line(A ) -> str:
return f'''PARENT {' '.join(A )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ = 0
for i, l in enumerate(A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(A ):
lowercase__ = parents_per_chain[chain_counter]
else:
lowercase__ = ["N/A"]
out_pdb_lines.append(make_parent_line(A ) )
return "\n".join(A )
def __a ( A ):
'''simple docstring'''
lowercase__ = residue_constants.restypes + ["X"]
def res_atoa(A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
lowercase__ = residue_constants.atom_types
lowercase__ = []
lowercase__ = prot.atom_mask
lowercase__ = prot.aatype
lowercase__ = prot.atom_positions
lowercase__ = prot.residue_index.astype(np.intaa )
lowercase__ = prot.b_factors
lowercase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
lowercase__ = get_pdb_headers(A )
if len(A ) > 0:
pdb_lines.extend(A )
lowercase__ = aatype.shape[0]
lowercase__ = 1
lowercase__ = 0
lowercase__ = string.ascii_uppercase
lowercase__ = None
# Add all atom sites.
for i in range(A ):
lowercase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ = "ATOM"
lowercase__ = atom_name if len(A ) == 4 else f''' {atom_name}'''
lowercase__ = ""
lowercase__ = ""
lowercase__ = 1.00
lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ = ""
lowercase__ = "A"
if chain_index is not None:
lowercase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ = (
f'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
f'''{res_name_a:>3} {chain_tag:>1}'''
f'''{residue_index[i]:>4}{insertion_code:>1} '''
f'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
f'''{occupancy:>6.2f}{b_factor:>6.2f} '''
f'''{element:>2}{charge:>2}'''
)
pdb_lines.append(A )
atom_index += 1
lowercase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ = True
lowercase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ = "TER"
lowercase__ = (
f'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(A , A ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(A )
def __a ( A ):
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def __a ( A , A , A = None , A = None , A = None , A = None , A = None , ):
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
| 668 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668 |
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668 | 1 |
"""simple docstring"""
from functools import lru_cache
def __a ( A ):
'''simple docstring'''
lowercase__ = 2
lowercase__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(A )
if n > 1:
factors.add(A )
return factors
@lru_cache
def __a ( A ):
'''simple docstring'''
return len(unique_prime_factors(A ) )
def __a ( A ):
'''simple docstring'''
return len(set(A ) ) in (0, 1)
def __a ( A ):
'''simple docstring'''
lowercase__ = 2
while True:
# Increment each value of a generated range
lowercase__ = [base + i for i in range(A )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase__ = [upf_len(A ) for x in group]
checker.append(A )
# If all numbers in the list are equal, return the group variable.
if equality(A ):
return group
# Increment our base variable by 1
base += 1
def __a ( A = 4 ):
'''simple docstring'''
lowercase__ = run(A )
return results[0] if len(A ) else None
if __name__ == "__main__":
print(solution())
| 668 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_: Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_: List[str] = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class a__ ( _lowerCamelCase ):
snake_case_ = "dpt"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=384, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=True, _UpperCAmelCase=[2, 5, 8, 11], _UpperCAmelCase="project", _UpperCAmelCase=[4, 2, 1, 0.5], _UpperCAmelCase=[96, 192, 384, 768], _UpperCAmelCase=256, _UpperCAmelCase=-1, _UpperCAmelCase=False, _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=255, _UpperCAmelCase=0.1, _UpperCAmelCase=[1, 1024, 24, 24], _UpperCAmelCase=[0, 1], _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**A__ )
lowercase__ = hidden_size
lowercase__ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
lowercase__ = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
lowercase__ = BitConfig(**A__ )
elif isinstance(A__, A__ ):
logger.info("Initializing the config with a `BiT` backbone." )
lowercase__ = BitConfig(**A__ )
elif isinstance(A__, A__ ):
lowercase__ = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowercase__ = backbone_featmap_shape
lowercase__ = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
lowercase__ = None
lowercase__ = None
lowercase__ = []
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
lowercase__ = readout_type
lowercase__ = reassemble_factors
lowercase__ = neck_hidden_sizes
lowercase__ = fusion_hidden_size
lowercase__ = head_in_index
lowercase__ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = semantic_loss_ignore_index
lowercase__ = semantic_classifier_dropout
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase__ = self.backbone_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 700 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 )
lowercase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase, [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 0 |
"""simple docstring"""
def __a ( A = 4_00_00_00 ):
'''simple docstring'''
lowercase__ = []
lowercase__ , lowercase__ = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__A )
lowercase__ , lowercase__ = b, a + b
return sum(__A )
if __name__ == "__main__":
print(F'{solution() = }')
| 701 |
"""simple docstring"""
import itertools
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ):
'''simple docstring'''
lowercase__ = 2
while True:
if is_prime(A ):
yield num
num += 1
def __a ( A = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=snake_case__ ):
snake_case_ = ["""torch""", """transformers""", """onnx"""]
def __init__( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(self, ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "transformers", "onnx"] )
class a__ ( metaclass=snake_case__ ):
snake_case_ = ["""torch""", """transformers""", """onnx"""]
def __init__( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(self, ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "transformers", "onnx"] )
class a__ ( metaclass=snake_case__ ):
snake_case_ = ["""torch""", """transformers""", """onnx"""]
def __init__( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(self, ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "transformers", "onnx"] )
class a__ ( metaclass=snake_case__ ):
snake_case_ = ["""torch""", """transformers""", """onnx"""]
def __init__( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(self, ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "transformers", "onnx"] )
class a__ ( metaclass=snake_case__ ):
snake_case_ = ["""torch""", """transformers""", """onnx"""]
def __init__( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(self, ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "transformers", "onnx"] )
class a__ ( metaclass=snake_case__ ):
snake_case_ = ["""torch""", """transformers""", """onnx"""]
def __init__( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(self, ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "transformers", "onnx"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["torch", "transformers", "onnx"] )
| 702 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 0 |
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowerCAmelCase_: Tuple = logging.get_logger(__name__)
lowerCAmelCase_: str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
lowerCAmelCase_: Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __a ( A , A , A , A , A ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase__ = getattr(A , A )
if weight_type is not None:
lowercase__ = getattr(A , A ).shape
else:
lowercase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == "group" , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(A )[0].split("." )[-2]
lowercase__ = mapped_key.replace("*" , A )
if "weight_g" in name:
lowercase__ = "weight_g"
elif "weight_v" in name:
lowercase__ = "weight_v"
elif "bias" in name:
lowercase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = "weight"
else:
lowercase__ = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __a ( A , A , A , A , A ):
'''simple docstring'''
lowercase__ = full_name.split("conv_layers." )[-1]
lowercase__ = name.split("." )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def __a ( A , A , A=None , A=None , A=True ):
'''simple docstring'''
if config_path is not None:
lowercase__ = UniSpeechSatConfig.from_pretrained(A )
else:
lowercase__ = UniSpeechSatConfig()
lowercase__ = ""
if is_finetuned:
lowercase__ = UniSpeechSatForCTC(A )
else:
lowercase__ = UniSpeechSatForPreTraining(A )
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
lowercase__ = model[0].eval()
recursively_load_weights(A , A )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_: List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
lowerCAmelCase_: Dict = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 703 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 0 |
"""simple docstring"""
def __a ( A , A ):
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
else:
return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
def __a ( A , A ):
'''simple docstring'''
if b < 0:
return 1 / actual_power(_lowercase , _lowercase )
return actual_power(_lowercase , _lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 704 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 668 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_: Dict = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: List[str] = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: str = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase_: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ), 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ), 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 668 | 0 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_: str = logging.get_logger()
@dataclass
class a__ :
snake_case_ = 42
snake_case_ = field(default_factory=UpperCAmelCase_ )
snake_case_ = field(default_factory=UpperCAmelCase_ )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = len(list(m.modules() ) ) == 1 or isinstance(_lowercase, nn.Convad ) or isinstance(_lowercase, nn.BatchNormad )
if has_not_submodules:
self.traced.append(_lowercase )
def __call__( self, _UpperCAmelCase ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_lowercase )
[x.remove() for x in self.handles]
return self
@property
def snake_case__ ( self ):
'''simple docstring'''
return list(filter(lambda _UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class a__ :
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0
snake_case_ = field(default_factory=UpperCAmelCase_ )
snake_case_ = field(default_factory=UpperCAmelCase_ )
def __call__( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = Tracker(self.dest )(_lowercase ).parametrized
lowercase__ = Tracker(self.src )(_lowercase ).parametrized
lowercase__ = list(filter(lambda _UpperCAmelCase : type(_lowercase ) not in self.src_skip, _lowercase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : type(_lowercase ) not in self.dest_skip, _lowercase ) )
if len(_lowercase ) != len(_lowercase ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(_lowercase )} operations while'''
F''' destination module has {len(_lowercase )}.''' )
for dest_m, src_m in zip(_lowercase, _lowercase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __a ( A , A , A , A = True ):
'''simple docstring'''
print(f'''Converting {name}...''' )
with torch.no_grad():
lowercase__ = timm.create_model(A , pretrained=A ).eval()
lowercase__ = ResNetForImageClassification(A ).eval()
lowercase__ = ModuleTransfer(src=A , dest=A )
lowercase__ = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(A )
assert torch.allclose(from_model(A ) , our_model(A ).logits ), "The model logits don't match the original one."
lowercase__ = f'''resnet{'-'.join(name.split('resnet' ) )}'''
print(A )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=A , )
# we can use the convnext one
lowercase__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=A , )
print(f'''Pushed {checkpoint_name}''' )
def __a ( A , A = None , A = True ):
'''simple docstring'''
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 10_00
lowercase__ = (1, num_labels)
lowercase__ = 'huggingface/label-files'
lowercase__ = num_labels
lowercase__ = json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
lowercase__ = {int(A ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = partial(A , num_labels=A , idalabel=A , labelaid=A )
lowercase__ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(A , names_to_config[model_name] , A , A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(A , A , A , A )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase_: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
lowerCAmelCase_: Union[str, Any] = parser.parse_args()
lowerCAmelCase_: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 707 |
"""simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowerCAmelCase_: Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_: Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_: Tuple = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
lowerCAmelCase_: Optional[int] = {"allegro/herbert-base-cased": 5_1_4}
lowerCAmelCase_: List[Any] = {}
class a__ ( __A ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = HerbertTokenizer
def __init__( self, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase="<s>", _UpperCAmelCase="<unk>", _UpperCAmelCase="<pad>", _UpperCAmelCase="<mask>", _UpperCAmelCase="</s>", **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, _UpperCAmelCase, tokenizer_file=_UpperCAmelCase, cls_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(_UpperCAmelCase, name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 708 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__ ( _a , unittest.TestCase ):
snake_case_ = PriorTransformer
snake_case_ = "hidden_states"
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(_UpperCAmelCase )
lowercase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowercase__ = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase, "set_default_attn_processor" ):
model.set_default_attn_processor()
lowercase__ = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
lowercase__ = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) )
@slow
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = batch_size
lowercase__ = embedding_dim
lowercase__ = num_embeddings
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" )
model.to(_UpperCAmelCase )
lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
lowercase__ = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
| 668 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_: List[Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: int = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 |
"""simple docstring"""
lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ = len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_: List[str] = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ):
'''simple docstring'''
lowercase__ = symbols(A )
lowercase__ = lambdify(A , A )
lowercase__ = lambdify(A , diff(A , A ) )
lowercase__ = starting_point
while True:
if diff_function(A ) != 0:
lowercase__ = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 668 | 0 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
lowerCAmelCase_: int = get_logger(__name__)
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=None ):
'''simple docstring'''
lowercase__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self, __A, getattr(__A, __A ) )
lowercase__ = module._original_module if isinstance(__A, _PatchedModuleObj ) else module
class a__ :
snake_case_ = []
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None ):
'''simple docstring'''
lowercase__ = obj
lowercase__ = target
lowercase__ = new
lowercase__ = target.split("." )[0]
lowercase__ = {}
lowercase__ = attrs or []
def __enter__( self ):
'''simple docstring'''
lowercase__ = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__A ) ):
try:
lowercase__ = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
lowercase__ = getattr(self.obj, __A )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__A, _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
lowercase__ = obj_attr
# patch at top level
setattr(self.obj, __A, _PatchedModuleObj(__A, attrs=self.attrs ) )
lowercase__ = getattr(self.obj, __A )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__A, __A, _PatchedModuleObj(getattr(__A, __A, __A ), attrs=self.attrs ) )
lowercase__ = getattr(__A, __A )
# finally set the target attribute
setattr(__A, __A, self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
lowercase__ = getattr(import_module(".".join(__A ) ), __A )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj, __A ) is attr_value:
lowercase__ = getattr(self.obj, __A )
setattr(self.obj, __A, self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
lowercase__ = globals()["__builtins__"][target_attr]
setattr(self.obj, __A, self.new )
else:
raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self, *_UpperCAmelCase ):
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj, __A, self.original.pop(__A ) )
def snake_case__ ( self ):
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def snake_case__ ( self ):
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 711 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 0 |
"""simple docstring"""
from manim import *
class a__ ( UpperCamelCase_ ):
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = Rectangle(height=0.5, width=0.5 )
lowercase__ = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*_a ).arrange(_a, buff=0 )
lowercase__ = VGroup(*_a ).arrange(_a, buff=0 )
lowercase__ = VGroup(_a, _a ).arrange(_a, buff=0 )
lowercase__ = Text("CPU", font_size=24 )
lowercase__ = Group(_a, _a ).arrange(_a, buff=0.5, aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
lowercase__ = [mem.copy() for i in range(1 )]
lowercase__ = VGroup(*_a ).arrange(_a, buff=0 )
lowercase__ = Text("GPU", font_size=24 )
lowercase__ = Group(_a, _a ).arrange(_a, buff=0.5, aligned_edge=_a )
gpu.align_to(_a, _a )
gpu.set_x(gpu.get_x() - 1 )
self.add(_a )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*_a ).arrange(_a, buff=0 )
lowercase__ = Text("Model", font_size=24 )
lowercase__ = Group(_a, _a ).arrange(_a, buff=0.5, aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.play(
Create(_a, run_time=1 ), Create(_a, run_time=1 ), Create(_a, run_time=1 ), )
lowercase__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''', font_size=24, )
lowercase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_a, run_time=2.5 ), Write(_a ), Write(_a ) )
self.add(_a )
lowercase__ = []
lowercase__ = []
lowercase__ = []
for i, rect in enumerate(_a ):
lowercase__ = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(_a, opacity=0.7 )
cpu_target.move_to(_a )
cpu_target.generate_target()
lowercase__ = 0.46 / 4
lowercase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=_a )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=_a, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=_a, buff=0.0 )
cpu_targs.append(_a )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_a ) )
second_animations.append(MoveToTarget(_a, run_time=1.5 ) )
self.play(*_a )
self.play(*_a )
self.wait()
| 712 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 668 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=_snake_case ):
snake_case_ = ['note_seq']
def __init__( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(self, ["note_seq"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["note_seq"] )
@classmethod
def snake_case__ ( cls, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
requires_backends(cls, ["note_seq"] )
| 713 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowercase__ = []
for num in range(len(A ) ):
lowercase__ = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ = odd_composites[num] - 2 * i * i
if is_prime(A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A ) == n:
return list_nums
return []
def __a ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 0 |
"""simple docstring"""
import heapq
import sys
import numpy as np
lowerCAmelCase_: List[Any] = tuple[int, int]
class a__ :
def __init__( self ):
'''simple docstring'''
lowercase__ = []
lowercase__ = set()
def snake_case__ ( self ):
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def snake_case__ ( self ):
'''simple docstring'''
return len(self.elements ) == 0
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements, (priority, item) )
self.set.add(_UpperCAmelCase )
else:
# update
# print("update", item)
lowercase__ = []
((lowercase__) , (lowercase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowercase__) , (lowercase__)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements, (pro, xxx) )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if item in self.set:
self.set.remove(_UpperCAmelCase )
lowercase__ = []
((lowercase__) , (lowercase__)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowercase__) , (lowercase__)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements, (prito, yyy) )
def snake_case__ ( self ):
'''simple docstring'''
return self.elements[0][1]
def snake_case__ ( self ):
'''simple docstring'''
((lowercase__) , (lowercase__)) = heapq.heappop(self.elements )
self.set.remove(_UpperCAmelCase )
return (priority, item)
def __a ( A , A ):
'''simple docstring'''
lowercase__ = np.array(lowerCAmelCase__ )
lowercase__ = np.array(lowerCAmelCase__ )
return np.linalg.norm(a - b )
def __a ( A , A ):
'''simple docstring'''
return consistent_heuristic(lowerCAmelCase__ , lowerCAmelCase__ ) // t
def __a ( A , A ):
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __a ( A , A , A , A ):
'''simple docstring'''
lowercase__ = g_function[start] + Wa * heuristics[i](lowerCAmelCase__ , lowerCAmelCase__ )
return ans
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = np.chararray((n, n) )
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
lowercase__ = "*"
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
if (j, (n - 1) - i) in blocks:
lowercase__ = "#"
lowercase__ = "-"
lowercase__ = back_pointer[goal]
while x != start:
((lowercase__) , (lowercase__)) = x
# print(x)
lowercase__ = "-"
lowercase__ = back_pointer[x]
lowercase__ = "-"
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
lowercase__ = back_pointer[goal]
while x != start:
print(lowerCAmelCase__ , end=" " )
lowercase__ = back_pointer[x]
print(lowerCAmelCase__ )
sys.exit()
def __a ( A ):
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __a ( A , A , A , A , A , A , A , A , ):
'''simple docstring'''
for itera in range(lowerCAmelCase__ ):
open_list[itera].remove_element(lowerCAmelCase__ )
# print("s", s)
# print("j", j)
((lowercase__) , (lowercase__)) = s
lowercase__ = (x - 1, y)
lowercase__ = (x + 1, y)
lowercase__ = (x, y + 1)
lowercase__ = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCAmelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCAmelCase__ )
lowercase__ = -1
lowercase__ = float("inf" )
if valid(lowerCAmelCase__ ) and g_function[neighbours] > g_function[s] + 1:
lowercase__ = g_function[s] + 1
lowercase__ = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCAmelCase__ , key(lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCAmelCase__ ):
if key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) <= Wa * key(
lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ ):
open_list[j].put(
lowerCAmelCase__ , key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( ):
'''simple docstring'''
lowercase__ = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowerCAmelCase_: Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCAmelCase_: Tuple = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(1_0, 1),
(1_1, 1),
(1_2, 1),
(1_3, 1),
(1_4, 1),
(1_5, 1),
(1_6, 1),
(1_7, 1),
(1_8, 1),
(1_9, 1),
]
lowerCAmelCase_: int = make_common_ground()
lowerCAmelCase_: Optional[Any] = blocks_blk
# hyper parameters
lowerCAmelCase_: Optional[int] = 1
lowerCAmelCase_: Dict = 1
lowerCAmelCase_: Union[str, Any] = 2_0
lowerCAmelCase_: Tuple = 3 # one consistent and two other inconsistent
# start and end destination
lowerCAmelCase_: Optional[int] = (0, 0)
lowerCAmelCase_: Optional[Any] = (n - 1, n - 1)
lowerCAmelCase_: Tuple = 1
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = {start: 0, goal: float("inf" )}
lowercase__ = {start: -1, goal: -1}
lowercase__ = []
lowercase__ = set()
for i in range(lowerCAmelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCAmelCase__ , key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
lowercase__ = []
lowercase__ = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , lowerCAmelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
lowercase__ , lowercase__ = open_list[i].top_show()
visited.add(lowerCAmelCase__ )
expand_state(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
close_list_inad.append(lowerCAmelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
lowercase__ = open_list[0].top_show()
visited.add(lowerCAmelCase__ )
expand_state(
lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
close_list_anchor.append(lowerCAmelCase__ )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCAmelCase__ ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 714 |
"""simple docstring"""
import os
import sys
lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase_: Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
| 668 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase_: str = logging.get_logger(__name__)
UpperCAmelCase_: Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_: int = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_: Any = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
UpperCAmelCase_: Optional[Any] = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class a__ ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = DistilBertTokenizer
def __init__( self, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=True, _UpperCAmelCase="[UNK]", _UpperCAmelCase="[SEP]", _UpperCAmelCase="[PAD]", _UpperCAmelCase="[CLS]", _UpperCAmelCase="[MASK]", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCamelCase, tokenizer_file=_UpperCamelCase, do_lower_case=_UpperCamelCase, unk_token=_UpperCamelCase, sep_token=_UpperCamelCase, pad_token=_UpperCamelCase, cls_token=_UpperCamelCase, mask_token=_UpperCamelCase, tokenize_chinese_chars=_UpperCamelCase, strip_accents=_UpperCamelCase, **_UpperCamelCase, )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase", _UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents", _UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars", _UpperCamelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(_UpperCamelCase, normalizer_state.pop("type" ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**_UpperCamelCase )
lowercase__ = do_lower_case
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=None ):
'''simple docstring'''
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(_UpperCamelCase, name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 715 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
lowerCAmelCase_: Tuple = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __a ( A ):
'''simple docstring'''
lowercase__ = {}
with open(_lowerCamelCase , "r" ) as file:
for line_number, line in enumerate(_lowerCamelCase ):
lowercase__ = line.strip()
if line:
lowercase__ = line.split()
lowercase__ = line_number
lowercase__ = words[0]
lowercase__ = value
return result
def __a ( A , A , A , A , A ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase__ = getattr(_lowerCamelCase , _lowerCamelCase )
lowercase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
lowercase__ = PARAM_MAPPING[full_name.split("." )[-1]]
lowercase__ = 'param'
if weight_type is not None and weight_type != "param":
lowercase__ = getattr(_lowerCamelCase , _lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
lowercase__ = hf_pointer
for attribute in hf_param_name.split("." ):
lowercase__ = getattr(_lowerCamelCase , _lowerCamelCase )
lowercase__ = shape_pointer.shape
# let's reduce dimension
lowercase__ = value[0]
else:
lowercase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
lowercase__ = getattr(_lowerCamelCase , _lowerCamelCase )
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __a ( A , A , A , A , A ):
'''simple docstring'''
lowercase__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_lowerCamelCase ):
lowercase__ = PARAM_MAPPING[full_name.split("." )[-1]]
lowercase__ = 'param'
if weight_type is not None and weight_type != "param":
lowercase__ = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowercase__ = '.'.join([key, hf_param_name] )
else:
lowercase__ = key
lowercase__ = value if 'lm_head' in full_key else value[0]
lowerCAmelCase_: List[Any] = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __a ( A , A , A=None , A=None ):
'''simple docstring'''
lowercase__ = False
for key, mapped_key in MAPPING.items():
lowercase__ = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(_lowerCamelCase )[0].split("." )[-2]
lowercase__ = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
lowercase__ = 'weight_g'
elif "weight_v" in name:
lowercase__ = 'weight_v'
elif "bias" in name:
lowercase__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = 'weight'
else:
lowercase__ = None
if hf_dict is not None:
rename_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return is_used
return is_used
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
lowercase__ = True
else:
lowercase__ = load_wavaveca_layer(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __a ( A , A , A , A , A ):
'''simple docstring'''
lowercase__ = full_name.split("conv_layers." )[-1]
lowercase__ = name.split("." )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def __a ( A , A , A=None , A=None , A=True , A=False ):
'''simple docstring'''
if config_path is not None:
lowercase__ = WavaVecaConfig.from_pretrained(_lowerCamelCase )
else:
lowercase__ = WavaVecaConfig()
if is_seq_class:
lowercase__ = read_txt_into_dict(_lowerCamelCase )
lowercase__ = idalabel
lowercase__ = WavaVecaForSequenceClassification(_lowerCamelCase )
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
feature_extractor.save_pretrained(_lowerCamelCase )
elif is_finetuned:
if dict_path:
lowercase__ = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase__ = target_dict.pad_index
lowercase__ = target_dict.bos_index
lowercase__ = target_dict.eos_index
lowercase__ = len(target_dict.symbols )
lowercase__ = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
lowercase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase__ = 0
lowercase__ = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
lowercase__ = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
lowercase__ = True if config.feat_extract_norm == 'layer' else False
lowercase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
lowercase__ = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
lowercase__ = WavaVecaForCTC(_lowerCamelCase )
else:
lowercase__ = WavaVecaForPreTraining(_lowerCamelCase )
if is_finetuned or is_seq_class:
lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase__ = argparse.Namespace(task="audio_pretraining" )
lowercase__ = fairseq.tasks.setup_task(_lowerCamelCase )
lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
lowercase__ = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_: int = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
lowerCAmelCase_: List[Any] = parser.parse_args()
lowerCAmelCase_: List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 716 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
lowercase__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler("sample_euler" )
lowercase__ = "A painting of a squirrel eating a burger"
lowercase__ = torch.manual_seed(0 )
lowercase__ = sd_pipe([prompt], generator=__lowerCamelCase, guidance_scale=9.0, num_inference_steps=20, output_type="np" )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler("sample_euler" )
lowercase__ = "A painting of a squirrel eating a burger"
lowercase__ = torch.manual_seed(0 )
lowercase__ = sd_pipe([prompt], generator=__lowerCamelCase, guidance_scale=9.0, num_inference_steps=20, output_type="np" )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
lowercase__ = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
lowercase__ = "A painting of a squirrel eating a burger"
lowercase__ = torch.manual_seed(0 )
lowercase__ = sd_pipe(
[prompt], generator=__lowerCamelCase, guidance_scale=7.5, num_inference_steps=15, output_type="np", use_karras_sigmas=__lowerCamelCase, )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "markuplm"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
# additional properties
lowercase__ = max_depth
lowercase__ = max_xpath_tag_unit_embeddings
lowercase__ = max_xpath_subs_unit_embeddings
lowercase__ = tag_pad_id
lowercase__ = subs_pad_id
lowercase__ = xpath_unit_hidden_size
| 668 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _UpperCAmelCase ):
snake_case_ = (DDPMParallelScheduler,)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowerCamelCase_ )
return config
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase_, beta_end=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase_, prediction_type=lowerCamelCase_, sample_max_value=lowerCamelCase_, )
def snake_case__ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = self.dummy_sample_deter + 0.1
lowercase__ = self.dummy_sample_deter - 0.1
lowercase__ = samplea.shape[0]
lowercase__ = torch.stack([samplea, samplea, samplea], dim=0 )
lowercase__ = torch.arange(lowerCamelCase_ )[0:3, None].repeat(1, lowerCamelCase_ )
lowercase__ = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
lowercase__ = scheduler.batch_step_no_noise(lowerCamelCase_, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ) )
lowercase__ = torch.sum(torch.abs(lowerCamelCase_ ) )
lowercase__ = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5_005 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
lowercase__ = model(lowerCamelCase_, lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowercase__ = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, generator=lowerCamelCase_ ).prev_sample
lowercase__ = pred_prev_sample
lowercase__ = torch.sum(torch.abs(lowerCamelCase_ ) )
lowercase__ = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(prediction_type="v_prediction" )
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
lowercase__ = torch.manual_seed(0 )
for t in reversed(range(lowerCamelCase_ ) ):
# 1. predict noise residual
lowercase__ = model(lowerCamelCase_, lowerCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowercase__ = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, generator=lowerCamelCase_ ).prev_sample
lowercase__ = pred_prev_sample
lowercase__ = torch.sum(torch.abs(lowerCamelCase_ ) )
lowercase__ = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
lowercase__ = scheduler.timesteps
for i, timestep in enumerate(lowerCamelCase_ ):
if i == len(lowerCamelCase_ ) - 1:
lowercase__ = -1
else:
lowercase__ = timesteps[i + 1]
lowercase__ = scheduler.previous_timestep(lowerCamelCase_ )
lowercase__ = prev_t.item()
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = [100, 87, 50, 51, 0]
with self.assertRaises(lowerCamelCase_, msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = [100, 87, 50, 1, 0]
lowercase__ = len(lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=lowerCamelCase_, timesteps=lowerCamelCase_ )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**lowerCamelCase_ )
lowercase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCamelCase_, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ):
scheduler.set_timesteps(timesteps=lowerCamelCase_ )
| 718 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ ( _UpperCAmelCase ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=13, _UpperCAmelCase=7, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=2, _UpperCAmelCase=99, _UpperCAmelCase=0, _UpperCAmelCase=32, _UpperCAmelCase=5, _UpperCAmelCase=4, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=12, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=3, _UpperCAmelCase=4, _UpperCAmelCase="last", _UpperCAmelCase=None, _UpperCAmelCase=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_lengths
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = gelu_activation
lowercase__ = sinusoidal_embeddings
lowercase__ = causal
lowercase__ = asm
lowercase__ = n_langs
lowercase__ = vocab_size
lowercase__ = n_special
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = summary_type
lowercase__ = use_proj
lowercase__ = scope
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_input_lengths:
lowercase__ = (
ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.n_langs )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], 2 ).float()
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case__ ( self ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = FlaubertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(__UpperCamelCase, lengths=__UpperCamelCase, langs=__UpperCamelCase )
lowercase__ = model(__UpperCamelCase, langs=__UpperCamelCase )
lowercase__ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = FlaubertWithLMHeadModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(__UpperCamelCase, token_type_ids=__UpperCamelCase, labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = FlaubertForQuestionAnsweringSimple(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(__UpperCamelCase )
lowercase__ = model(__UpperCamelCase, start_positions=__UpperCamelCase, end_positions=__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = FlaubertForQuestionAnswering(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(__UpperCamelCase )
lowercase__ = model(
__UpperCamelCase, start_positions=__UpperCamelCase, end_positions=__UpperCamelCase, cls_index=__UpperCamelCase, is_impossible=__UpperCamelCase, p_mask=__UpperCamelCase, )
lowercase__ = model(
__UpperCamelCase, start_positions=__UpperCamelCase, end_positions=__UpperCamelCase, cls_index=__UpperCamelCase, is_impossible=__UpperCamelCase, )
((lowercase__) , ) = result_with_labels.to_tuple()
lowercase__ = model(__UpperCamelCase, start_positions=__UpperCamelCase, end_positions=__UpperCamelCase )
((lowercase__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, () )
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = FlaubertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(__UpperCamelCase )
lowercase__ = model(__UpperCamelCase, labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = FlaubertForTokenClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = model(__UpperCamelCase, attention_mask=__UpperCamelCase, labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = FlaubertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = model(
__UpperCamelCase, attention_mask=__UpperCamelCase, token_type_ids=__UpperCamelCase, labels=__UpperCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class a__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
snake_case_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(__UpperCamelCase, __UpperCamelCase, return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=__UpperCamelCase )
lowercase__ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=__UpperCamelCase )
return inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaubertModelTester(self )
lowercase__ = ConfigTester(self, config_class=__UpperCamelCase, emb_dim=37 )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCamelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCamelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__UpperCamelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCamelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCamelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__UpperCamelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__UpperCamelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FlaubertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
@require_torch_gpu
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ = True
lowercase__ = model_class(config=__UpperCamelCase )
lowercase__ = self._prepare_for_class(__UpperCamelCase, __UpperCamelCase )
lowercase__ = torch.jit.trace(
__UpperCamelCase, (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCamelCase, os.path.join(__UpperCamelCase, "traced_model.pt" ) )
lowercase__ = torch.jit.load(os.path.join(__UpperCamelCase, "traced_model.pt" ), map_location=__UpperCamelCase )
loaded(inputs_dict["input_ids"].to(__UpperCamelCase ), inputs_dict["attention_mask"].to(__UpperCamelCase ) )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase__ = model(__UpperCamelCase )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, __UpperCamelCase )
lowercase__ = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], __UpperCamelCase, atol=1E-4 ) )
| 719 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: Dict = logging.get_logger(__name__)
lowerCAmelCase_: List[str] = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class a__ ( _a ):
snake_case_ = """ibert"""
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=1, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase="absolute", _UpperCAmelCase=False, _UpperCAmelCase="none", **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case_, bos_token_id=snake_case_, eos_token_id=snake_case_, **snake_case_ )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class a__ ( _a ):
@property
def snake_case__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 720 |
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( SCREAMING_SNAKE_CASE__ ):
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "Pix2StructImageProcessor"
snake_case_ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = False
super().__init__(_lowercase, _lowercase )
def __call__( self, _UpperCAmelCase=None, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = False, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = 2048, _UpperCAmelCase = 0, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = True, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase__ = self.tokenizer
lowercase__ = self.tokenizer(
text=_lowercase, add_special_tokens=_lowercase, padding=_lowercase, truncation=_lowercase, max_length=_lowercase, stride=_lowercase, pad_to_multiple_of=_lowercase, return_attention_mask=_lowercase, return_overflowing_tokens=_lowercase, return_special_tokens_mask=_lowercase, return_offsets_mapping=_lowercase, return_token_type_ids=_lowercase, return_length=_lowercase, verbose=_lowercase, return_tensors=_lowercase, **_lowercase, )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase__ = self.image_processor(
_lowercase, return_tensors=_lowercase, max_patches=_lowercase, **_lowercase )
else:
# add pixel_values and bbox
lowercase__ = self.image_processor(
_lowercase, return_tensors=_lowercase, max_patches=_lowercase, header_text=_lowercase, **_lowercase )
if text is not None and not self.image_processor.is_vqa:
lowercase__ = self.tokenizer(
text=_lowercase, add_special_tokens=_lowercase, padding=_lowercase, truncation=_lowercase, max_length=_lowercase, stride=_lowercase, pad_to_multiple_of=_lowercase, return_attention_mask=_lowercase, return_overflowing_tokens=_lowercase, return_special_tokens_mask=_lowercase, return_offsets_mapping=_lowercase, return_token_type_ids=_lowercase, return_length=_lowercase, verbose=_lowercase, return_tensors=_lowercase, **_lowercase, )
if "attention_mask" in text_encoding:
lowercase__ = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase__ = text_encoding.pop("input_ids" )
else:
lowercase__ = None
if text_encoding is not None:
encoding_image_processor.update(_lowercase )
return encoding_image_processor
def snake_case__ ( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase, **_lowercase )
def snake_case__ ( self, *_UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*_lowercase, **_lowercase )
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 721 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __a ( A ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
lowercase__ = precision
lowercase__ = ceil(precision / 14 )
lowercase__ = 42_68_80 * Decimal(1_00_05 ).sqrt()
lowercase__ = 1
lowercase__ = 13_59_14_09
lowercase__ = Decimal(snake_case__ )
for k in range(1 , snake_case__ ):
lowercase__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(snake_case__ ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_: List[Any] = 5_0
print(F'The first {n} digits of pi is: {pi(n)}')
| 700 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 )
lowercase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase, [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 0 |
"""simple docstring"""
import numpy
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowercase__ = numpy.random.rand(
self.input_array.shape[1], 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowercase__ = numpy.random.rand(
4, 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowercase__ = numpy.random.rand(3, 1 )
# Real output values provided.
lowercase__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowercase__ = numpy.zeros(output_array.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = sigmoid(
numpy.dot(self.input_array, self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) )
return self.layer_between_second_hidden_layer_and_output
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T, 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ), )
lowercase__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T, numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ), self.second_hidden_layer_and_output_layer_weights.T, )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ), )
lowercase__ = numpy.dot(
self.input_array.T, numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ), self.second_hidden_layer_and_output_layer_weights.T, )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ), self.first_hidden_layer_and_second_hidden_layer_weights.T, )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ), )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for iteration in range(1, iterations + 1 ):
lowercase__ = self.feedforward()
self.back_propagation()
if give_loss:
lowercase__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = input_arr
lowercase__ = sigmoid(
numpy.dot(self.array, self.input_layer_and_first_hidden_layer_weights ) )
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer, self.first_hidden_layer_and_second_hidden_layer_weights, ) )
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer, self.second_hidden_layer_and_output_layer_weights, ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __a ( A ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __a ( A ):
'''simple docstring'''
return (value) * (1 - (value))
def __a ( ):
'''simple docstring'''
lowercase__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowercase__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowercase__ = TwoHiddenLayerNeuralNetwork(
input_array=__SCREAMING_SNAKE_CASE , output_array=__SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__SCREAMING_SNAKE_CASE , iterations=10 , give_loss=__SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 701 |
"""simple docstring"""
import itertools
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ):
'''simple docstring'''
lowercase__ = 2
while True:
if is_prime(A ):
yield num
num += 1
def __a ( A = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: Tuple = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class a__ ( _a ):
snake_case_ = "gpt_neox"
def __init__( self, _UpperCAmelCase=5_0432, _UpperCAmelCase=6144, _UpperCAmelCase=44, _UpperCAmelCase=64, _UpperCAmelCase=2_4576, _UpperCAmelCase="gelu", _UpperCAmelCase=0.25, _UpperCAmelCase=1_0000, _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.1, _UpperCAmelCase=2048, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-5, _UpperCAmelCase=True, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=False, _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = rotary_pct
lowercase__ = rotary_emb_base
lowercase__ = attention_dropout
lowercase__ = hidden_dropout
lowercase__ = classifier_dropout
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = use_parallel_residual
lowercase__ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def snake_case__ ( self ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'''got {self.rope_scaling}''' )
lowercase__ = self.rope_scaling.get("type", _UpperCAmelCase )
lowercase__ = self.rope_scaling.get("factor", _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase, _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 702 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 0 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCAmelCase_: int = logging.get_logger(__name__)
class a__ :
snake_case_ = 42
snake_case_ = None
@staticmethod
def snake_case__ ( ):
'''simple docstring'''
raise NotImplementedError
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError
def snake_case__ ( self ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def snake_case__ ( cls ):
'''simple docstring'''
return F'''`pip install {cls.pip_package or cls.name}`'''
class a__ ( UpperCamelCase_ ):
snake_case_ = "optuna"
@staticmethod
def snake_case__ ( ):
'''simple docstring'''
return is_optuna_available()
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_optuna(__A, __A, __A, **__A )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_optuna(__A )
class a__ ( UpperCamelCase_ ):
snake_case_ = "ray"
snake_case_ = "'ray[tune]'"
@staticmethod
def snake_case__ ( ):
'''simple docstring'''
return is_ray_available()
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_ray(__A, __A, __A, **__A )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_ray(__A )
class a__ ( UpperCamelCase_ ):
snake_case_ = "sigopt"
@staticmethod
def snake_case__ ( ):
'''simple docstring'''
return is_sigopt_available()
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_sigopt(__A, __A, __A, **__A )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_sigopt(__A )
class a__ ( UpperCamelCase_ ):
snake_case_ = "wandb"
@staticmethod
def snake_case__ ( ):
'''simple docstring'''
return is_wandb_available()
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_wandb(__A, __A, __A, **__A )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_wandb(__A )
lowerCAmelCase_: Union[str, Any] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __a ( ):
'''simple docstring'''
lowercase__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case_ ) > 0:
lowercase__ = available_backends[0].name
if len(snake_case_ ) > 1:
logger.info(
f'''{len(snake_case_ )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 703 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: Optional[int] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class a__ ( lowercase__ ):
snake_case_ = "ibert"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=1, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase="absolute", _UpperCAmelCase=False, _UpperCAmelCase="none", **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class a__ ( lowercase__ ):
@property
def snake_case__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 704 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 668 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: Dict = logging.get_logger(__name__)
lowerCAmelCase_: int = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class a__ ( __SCREAMING_SNAKE_CASE ):
snake_case_ = "openai-gpt"
snake_case_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, _UpperCAmelCase=4_0478, _UpperCAmelCase=512, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=1E-5, _UpperCAmelCase=0.02, _UpperCAmelCase="cls_index", _UpperCAmelCase=True, _UpperCAmelCase=None, _UpperCAmelCase=True, _UpperCAmelCase=0.1, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = afn
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = attn_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = summary_type
lowercase__ = summary_use_proj
lowercase__ = summary_activation
lowercase__ = summary_first_dropout
lowercase__ = summary_proj_to_labels
super().__init__(**_a )
| 705 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ), 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ), 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 0 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __a ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(snake_case__ )
lowercase__ = cos(snake_case__ )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = (1 - _cos) / 2
lowercase__ = 1 - _cos
lowercase__ = 1 + alpha
lowercase__ = -2 * _cos
lowercase__ = 1 - alpha
lowercase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __a ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(snake_case__ )
lowercase__ = cos(snake_case__ )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = (1 + _cos) / 2
lowercase__ = -1 - _cos
lowercase__ = 1 + alpha
lowercase__ = -2 * _cos
lowercase__ = 1 - alpha
lowercase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __a ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(snake_case__ )
lowercase__ = cos(snake_case__ )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = _sin / 2
lowercase__ = 0
lowercase__ = -ba
lowercase__ = 1 + alpha
lowercase__ = -2 * _cos
lowercase__ = 1 - alpha
lowercase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __a ( A , A , A = 1 / sqrt(2 ) ):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(snake_case__ )
lowercase__ = cos(snake_case__ )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = 1 - alpha
lowercase__ = -2 * _cos
lowercase__ = 1 + alpha
lowercase__ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __a ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(snake_case__ )
lowercase__ = cos(snake_case__ )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = 10 ** (gain_db / 40)
lowercase__ = 1 + alpha * big_a
lowercase__ = -2 * _cos
lowercase__ = 1 - alpha * big_a
lowercase__ = 1 + alpha / big_a
lowercase__ = -2 * _cos
lowercase__ = 1 - alpha / big_a
lowercase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __a ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(snake_case__ )
lowercase__ = cos(snake_case__ )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = 10 ** (gain_db / 40)
lowercase__ = (big_a + 1) - (big_a - 1) * _cos
lowercase__ = (big_a + 1) + (big_a - 1) * _cos
lowercase__ = (big_a - 1) - (big_a + 1) * _cos
lowercase__ = (big_a - 1) + (big_a + 1) * _cos
lowercase__ = 2 * sqrt(snake_case__ ) * alpha
lowercase__ = big_a * (pmc + aaa)
lowercase__ = 2 * big_a * mpc
lowercase__ = big_a * (pmc - aaa)
lowercase__ = ppmc + aaa
lowercase__ = -2 * pmpc
lowercase__ = ppmc - aaa
lowercase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __a ( A , A , A , A = 1 / sqrt(2 ) , ):
'''simple docstring'''
lowercase__ = tau * frequency / samplerate
lowercase__ = sin(snake_case__ )
lowercase__ = cos(snake_case__ )
lowercase__ = _sin / (2 * q_factor)
lowercase__ = 10 ** (gain_db / 40)
lowercase__ = (big_a + 1) - (big_a - 1) * _cos
lowercase__ = (big_a + 1) + (big_a - 1) * _cos
lowercase__ = (big_a - 1) - (big_a + 1) * _cos
lowercase__ = (big_a - 1) + (big_a + 1) * _cos
lowercase__ = 2 * sqrt(snake_case__ ) * alpha
lowercase__ = big_a * (ppmc + aaa)
lowercase__ = -2 * big_a * pmpc
lowercase__ = big_a * (ppmc - aaa)
lowercase__ = pmc + aaa
lowercase__ = 2 * mpc
lowercase__ = pmc - aaa
lowercase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 706 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 668 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( _a , unittest.TestCase ):
snake_case_ = CanineTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.tokenizer_class.from_pretrained(self.tmpdirname, **__lowerCAmelCase )
lowercase__ = 1024
return tokenizer
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.canine_tokenizer
lowercase__ = ["Life is like a box of chocolates.", "You never know what you\'re gonna get."]
# fmt: off
lowercase__ = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
lowercase__ = tokenizer(__lowerCAmelCase, padding=__lowerCAmelCase, return_tensors="pt" )
self.assertIsInstance(__lowerCAmelCase, __lowerCAmelCase )
lowercase__ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCAmelCase, __lowerCAmelCase )
self.assertEqual((2, 39), batch.input_ids.shape )
self.assertEqual((2, 39), batch.attention_mask.shape )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.canine_tokenizer
lowercase__ = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
lowercase__ = tokenizer(__lowerCAmelCase, padding=__lowerCAmelCase, return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids", __lowerCAmelCase )
self.assertIn("attention_mask", __lowerCAmelCase )
self.assertIn("token_type_ids", __lowerCAmelCase )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.canine_tokenizer
lowercase__ = [
"What\'s the weater?",
"It\'s about 25 degrees.",
]
lowercase__ = tokenizer(
text_target=__lowerCAmelCase, max_length=32, padding="max_length", truncation=__lowerCAmelCase, return_tensors="pt" )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(__lowerCAmelCase, add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
lowercase__ = after_tokenizer.encode(__lowerCAmelCase, add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase, __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowercase__ = chr(0Xe007 )
additional_special_tokens.append(__lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(__lowerCAmelCase, add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
lowercase__ = after_tokenizer.encode(__lowerCAmelCase, add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase, __lowerCAmelCase )
self.assertIn(__lowerCAmelCase, after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(__lowerCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_clean_sequence(__lowerCAmelCase )
# a special token for Canine can be defined as follows:
lowercase__ = 0Xe005
lowercase__ = chr(__lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode(__lowerCAmelCase, add_special_tokens=__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ), 1 )
lowercase__ = tokenizer.decode(ids + encoded_special_token, clean_up_tokenization_spaces=__lowerCAmelCase )
lowercase__ = tokenizer.encode(__lowerCAmelCase, add_special_tokens=__lowerCAmelCase )
lowercase__ = tokenizer.encode(__lowerCAmelCase, add_special_tokens=__lowerCAmelCase )
lowercase__ = tokenizer.encode(__lowerCAmelCase, add_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase, input_encoded + special_token_id )
lowercase__ = tokenizer.decode(__lowerCAmelCase, skip_special_tokens=__lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = chr(0Xe005 )
lowercase__ = chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1], special_tokens=__lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
lowercase__ = tokenizer.tokenize(__lowerCAmelCase )
lowercase__ = tokenizer.tokenize(__lowerCAmelCase )
self.assertEqual(len(__lowerCAmelCase ), 1 )
self.assertEqual(len(__lowerCAmelCase ), 1 )
self.assertEqual(token_a[0], __lowerCAmelCase )
self.assertEqual(token_a[0], __lowerCAmelCase )
@require_tokenizers
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
lowercase__ = 0Xe006
lowercase__ = chr(__lowerCAmelCase )
lowercase__ = AddedToken(__lowerCAmelCase, lstrip=__lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowerCAmelCase )
tokenizer.from_pretrained(__lowerCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(__lowerCAmelCase )
# a special token for Canine can be defined as follows:
lowercase__ = 0Xe006
lowercase__ = chr(__lowerCAmelCase )
lowercase__ = [new_token_a]
lowercase__ = [new_token_a]
with open(os.path.join(__lowerCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(__lowerCAmelCase, __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(__lowerCAmelCase, __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(__lowerCAmelCase, extra_ids=0 )
self.assertIn(__lowerCAmelCase, tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ), )
lowercase__ = 0Xe007
lowercase__ = chr(__lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = [AddedToken(__lowerCAmelCase, lstrip=__lowerCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
__lowerCAmelCase, additional_special_tokens=__lowerCAmelCase, extra_ids=0 )
self.assertIn(__lowerCAmelCase, tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a], tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "hello world"
if self.space_between_special_tokens:
lowercase__ = "[CLS] hello world [SEP]"
else:
lowercase__ = input
lowercase__ = tokenizer.encode(__lowerCAmelCase, add_special_tokens=__lowerCAmelCase )
lowercase__ = tokenizer.decode(__lowerCAmelCase, spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowerCAmelCase, [output, output.lower()] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = "a"
lowercase__ = ord(__lowerCAmelCase )
for attr in attributes_list:
setattr(__lowerCAmelCase, attr + "_id", __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase, __lowerCAmelCase ), __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase, attr + "_id" ), __lowerCAmelCase )
setattr(__lowerCAmelCase, attr + "_id", __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase, __lowerCAmelCase ), __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase, attr + "_id" ), __lowerCAmelCase )
setattr(__lowerCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(__lowerCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(__lowerCAmelCase, "additional_special_tokens_ids" ), [] )
lowercase__ = 0Xe006
lowercase__ = chr(__lowerCAmelCase )
setattr(__lowerCAmelCase, "additional_special_tokens_ids", [additional_special_token_id] )
self.assertListEqual(getattr(__lowerCAmelCase, "additional_special_tokens" ), [additional_special_token] )
self.assertListEqual(getattr(__lowerCAmelCase, "additional_special_tokens_ids" ), [additional_special_token_id] )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
| 707 |
"""simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668 | 0 |
def __a ( A ):
'''simple docstring'''
lowercase__ = len(UpperCAmelCase__ )
for _ in range(UpperCAmelCase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowercase__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCAmelCase_: List[Any] = list(range(1_0, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 708 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__ ( _a , unittest.TestCase ):
snake_case_ = PriorTransformer
snake_case_ = "hidden_states"
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(_UpperCAmelCase )
lowercase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowercase__ = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase, "set_default_attn_processor" ):
model.set_default_attn_processor()
lowercase__ = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
lowercase__ = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) )
@slow
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = batch_size
lowercase__ = embedding_dim
lowercase__ = num_embeddings
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" )
model.to(_UpperCAmelCase )
lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
lowercase__ = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
| 668 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_: int = get_tests_dir("fixtures/test_sentencepiece.model")
lowerCAmelCase_: int = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
lowerCAmelCase_: Optional[int] = '''>>zh<<'''
lowerCAmelCase_: Any = '''Helsinki-NLP/'''
if is_torch_available():
lowerCAmelCase_: Optional[Any] = '''pt'''
elif is_tf_available():
lowerCAmelCase_: Any = '''tf'''
else:
lowerCAmelCase_: int = '''jax'''
@require_sentencepiece
class a__ ( a__ , unittest.TestCase ):
snake_case_ = MarianTokenizer
snake_case_ = False
snake_case_ = True
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowercase__ = dict(zip(lowerCAmelCase__, range(len(lowerCAmelCase__ ) ) ) )
lowercase__ = Path(self.tmpdirname )
save_json(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(lowerCAmelCase__, save_dir / VOCAB_FILES_NAMES["target_spm"] )
lowercase__ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__ )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "</s>"
lowercase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ), lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ), lowerCAmelCase__ )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "</s>" )
self.assertEqual(vocab_keys[1], "<unk>" )
self.assertEqual(vocab_keys[-1], "<pad>" )
self.assertEqual(len(lowerCAmelCase__ ), 9 )
def snake_case__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 9 )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowercase__ = en_de_tokenizer(["I am a small frog"], return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__ )
lowercase__ = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(lowerCAmelCase__, batch.input_ids[0] )
lowercase__ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase__ )
lowercase__ = [x.name for x in Path(lowerCAmelCase__ ).glob("*" )]
self.assertIn("source.spm", lowerCAmelCase__ )
MarianTokenizer.from_pretrained(lowerCAmelCase__ )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = tok(
["I am a small frog" * 1000, "I am a small frog"], padding=lowerCAmelCase__, truncation=lowerCAmelCase__, return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape, (2, 512) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = tok(["I am a tiny frog", "I am a small frog"], padding=lowerCAmelCase__, return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__ )
self.assertEqual(batch_smaller.input_ids.shape, (2, 10) )
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name="Helsinki-NLP/opus-mt-en-de", revision="1a8c2263da11e68e50938f97e10cd57820bd504c", decode_kwargs={"use_source_tokenizer": True}, )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
lowercase__ = "Tämä on testi"
lowercase__ = "This is a test"
lowercase__ = [76, 7, 2047, 2]
lowercase__ = [69, 12, 11, 940, 2]
lowercase__ = tokenizer(lowerCAmelCase__ ).input_ids
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
lowercase__ = tokenizer(text_target=lowerCAmelCase__ ).input_ids
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__ )
lowercase__ = tokenizer.decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__, lowerCAmelCase__ )
| 709 |
"""simple docstring"""
lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ = len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __a ( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowercase__ = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , UpperCAmelCase__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __a ( ):
'''simple docstring'''
assert _test_patching.open is open
lowercase__ = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , UpperCAmelCase__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __a ( ):
'''simple docstring'''
lowercase__ = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , UpperCAmelCase__ ):
pass
def __a ( ):
'''simple docstring'''
lowercase__ = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , UpperCAmelCase__ ) is None
with patch_submodule(_test_patching , "len" , UpperCAmelCase__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __a ( ):
'''simple docstring'''
lowercase__ = "__test_patch_submodule_start_and_stop_mock__"
lowercase__ = patch_submodule(_test_patching , "open" , UpperCAmelCase__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __a ( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowercase__ = "__test_patch_submodule_successive_join__"
lowercase__ = "__test_patch_submodule_successive_dirname__"
lowercase__ = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , UpperCAmelCase__ ):
with patch_submodule(_test_patching , "os.rename" , UpperCAmelCase__ ):
with patch_submodule(_test_patching , "os.path.dirname" , UpperCAmelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , UpperCAmelCase__ ):
with patch_submodule(_test_patching , "os.path.join" , UpperCAmelCase__ ):
with patch_submodule(_test_patching , "os.path.dirname" , UpperCAmelCase__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __a ( ):
'''simple docstring'''
lowercase__ = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , UpperCAmelCase__ ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , UpperCAmelCase__ ):
pass
| 710 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ):
'''simple docstring'''
lowercase__ = symbols(A )
lowercase__ = lambdify(A , A )
lowercase__ = lambdify(A , diff(A , A ) )
lowercase__ = starting_point
while True:
if diff_function(A ) != 0:
lowercase__ = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 668 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __a ( A ):
'''simple docstring'''
lowercase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
def __a ( A ):
'''simple docstring'''
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
lowercase__ = emb.weight.data
return lin_layer
def __a ( A ):
'''simple docstring'''
lowercase__ = torch.load(_snake_case , map_location="cpu" )
lowercase__ = mam_aaa["args"] or mam_aaa["cfg"]["model"]
lowercase__ = mam_aaa["model"]
remove_ignore_keys_(_snake_case )
lowercase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
lowercase__ = MaMaaaConfig(
vocab_size=_snake_case , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
lowercase__ = state_dict["decoder.embed_tokens.weight"]
lowercase__ = MaMaaaForConditionalGeneration(_snake_case )
model.model.load_state_dict(_snake_case , strict=_snake_case )
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase_: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowerCAmelCase_: str = parser.parse_args()
lowerCAmelCase_: Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 711 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_: Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase_: Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = state_dict.pop(_lowercase )
lowercase__ = val
def __a ( A ):
'''simple docstring'''
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def __a ( A ):
'''simple docstring'''
lowercase__ = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:2_56, :]
lowercase__ = in_proj_bias[:2_56]
lowercase__ = in_proj_weight[2_56:5_12, :]
lowercase__ = in_proj_bias[2_56:5_12]
lowercase__ = in_proj_weight[-2_56:, :]
lowercase__ = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:2_56, :]
lowercase__ = in_proj_bias[:2_56]
lowercase__ = in_proj_weight[2_56:5_12, :]
lowercase__ = in_proj_bias[2_56:5_12]
lowercase__ = in_proj_weight[-2_56:, :]
lowercase__ = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:2_56, :]
lowercase__ = in_proj_bias_cross_attn[:2_56]
lowercase__ = in_proj_weight_cross_attn[2_56:5_12, :]
lowercase__ = in_proj_bias_cross_attn[2_56:5_12]
lowercase__ = in_proj_weight_cross_attn[-2_56:, :]
lowercase__ = in_proj_bias_cross_attn[-2_56:]
def __a ( A , A ):
'''simple docstring'''
lowercase__ = image.size
lowercase__ = max(_lowercase , _lowercase )
lowercase__ = 8_00 if '''detection''' in checkpoint_url else 10_00
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __a ( A ):
'''simple docstring'''
lowercase__ = F.to_tensor(_lowercase )
lowercase__ = F.normalize(_lowercase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __a ( A , A , A ):
'''simple docstring'''
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(_lowercase , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
lowercase__ = rename_backbone_keys(_lowercase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowercase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(_lowercase )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: '''table''', 1: '''table rotated'''}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 1_25
lowercase__ = 6
lowercase__ = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=8_00 if "detection" in checkpoint_url else 10_00 )
lowercase__ = TableTransformerForObjectDetection(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# verify our conversion
lowercase__ = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=_lowercase )
lowercase__ = Image.open(_lowercase ).convert("RGB" )
lowercase__ = normalize(resize(_lowercase , _lowercase ) ).unsqueeze(0 )
lowercase__ = model(_lowercase )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
lowercase__ = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
lowercase__ = (1, 1_25, 7)
lowercase__ = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
lowercase__ = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _lowercase , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _lowercase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(_lowercase )
image_processor.push_to_hub(_lowercase )
if __name__ == "__main__":
lowerCAmelCase_: str = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCAmelCase_: Optional[int] = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 712 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 668 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowerCAmelCase_: Dict = "examples/"
lowerCAmelCase_: Dict = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
lowerCAmelCase_: str = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
lowerCAmelCase_: List[str] = "README.md"
def __a ( A , A , A ):
'''simple docstring'''
with open(__UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase__ = f.read()
lowercase__ , lowercase__ = REPLACE_PATTERNS[pattern]
lowercase__ = replace.replace("VERSION" , __UpperCamelCase )
lowercase__ = re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(__UpperCamelCase )
def __a ( A ):
'''simple docstring'''
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern="examples" )
def __a ( A , A=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def __a ( ):
'''simple docstring'''
lowercase__ = "🤗 Transformers currently provides the following architectures"
lowercase__ = "1. Want to contribute a new model?"
with open(__UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
lowercase__ = f.readlines()
# Find the start of the list.
lowercase__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
lowercase__ = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(__UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__UpperCamelCase )
def __a ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
lowercase__ = f.read()
lowercase__ = REPLACE_PATTERNS["init"][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def __a ( A=False ):
'''simple docstring'''
lowercase__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
lowercase__ = default_version.base_version
elif patch:
lowercase__ = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase__ = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase__ = input(f'''Which version are you releasing? [{default_version}]''' )
if len(__UpperCamelCase ) == 0:
lowercase__ = default_version
print(f'''Updating version to {version}.''' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def __a ( ):
'''simple docstring'''
lowercase__ = get_version()
lowercase__ = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase__ = current_version.base_version
# Check with the user we got that right.
lowercase__ = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(__UpperCamelCase ) == 0:
lowercase__ = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(__UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase_: Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
lowerCAmelCase_: Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 713 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowercase__ = []
for num in range(len(A ) ):
lowercase__ = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ = odd_composites[num] - 2 * i * i
if is_prime(A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A ) == n:
return list_nums
return []
def __a ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a__ ( UpperCAmelCase__ ):
snake_case_ = 42
snake_case_ = 42
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case_ = 1
@register_to_config
def __init__( self, _UpperCAmelCase = 2000, _UpperCAmelCase = 0.15, _UpperCAmelCase = 0.01, _UpperCAmelCase = 1348.0, _UpperCAmelCase = 1E-5, _UpperCAmelCase = 1, ):
'''simple docstring'''
lowercase__ = sigma_max
# setable values
lowercase__ = None
self.set_sigmas(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None ):
'''simple docstring'''
return sample
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None ):
'''simple docstring'''
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase__ = torch.linspace(1, _UpperCAmelCase, _UpperCAmelCase, device=_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None ):
'''simple docstring'''
lowercase__ = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase__ = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase__ = torch.exp(torch.linspace(math.log(_UpperCAmelCase ), math.log(_UpperCAmelCase ), _UpperCAmelCase ) )
lowercase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
lowercase__ = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase__ = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase__ = timesteps.to(self.discrete_sigmas.device )
lowercase__ = self.discrete_sigmas[timesteps].to(sample.device )
lowercase__ = self.get_adjacent_sigma(_UpperCAmelCase, _UpperCAmelCase ).to(sample.device )
lowercase__ = torch.zeros_like(_UpperCAmelCase )
lowercase__ = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase__ = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase__ = diffusion.unsqueeze(-1 )
lowercase__ = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase__ = randn_tensor(
sample.shape, layout=sample.layout, generator=_UpperCAmelCase, device=sample.device, dtype=sample.dtype )
lowercase__ = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_UpperCAmelCase, prev_sample_mean=_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase__ = randn_tensor(sample.shape, layout=sample.layout, generator=_UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase__ = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
lowercase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase__ = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase__ = step_size.unsqueeze(-1 )
lowercase__ = sample + step_size * model_output
lowercase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = timesteps.to(original_samples.device )
lowercase__ = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase__ = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_UpperCAmelCase ) * sigmas[:, None, None, None]
)
lowercase__ = noise + original_samples
return noisy_samples
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 714 |
"""simple docstring"""
import os
import sys
lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase_: Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
| 668 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class a__ ( lowercase__ ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = params
lowercase__ = np.array(__lowercase )
lowercase__ = np.array([len(__lowercase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self, _UpperCAmelCase ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
'''simple docstring'''
return len(self.lengths )
def snake_case__ ( self ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.params.max_model_input_size
lowercase__ = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowercase )} too long sequences.''' )
def divide_chunks(_UpperCAmelCase, _UpperCAmelCase ):
return [l[i : i + n] for i in range(0, len(__lowercase ), __lowercase )]
lowercase__ = []
lowercase__ = []
if self.params.mlm:
lowercase__ , lowercase__ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
lowercase__ , lowercase__ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids, self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowercase__ = []
for sub_s in divide_chunks(seq_, max_len - 2 ):
if sub_s[0] != cls_id:
lowercase__ = np.insert(__lowercase, 0, __lowercase )
if sub_s[-1] != sep_id:
lowercase__ = np.insert(__lowercase, len(__lowercase ), __lowercase )
assert len(__lowercase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowercase )
new_tok_ids.extend(__lowercase )
new_lengths.extend([len(__lowercase ) for l in sub_seqs] )
lowercase__ = np.array(__lowercase )
lowercase__ = np.array(__lowercase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = len(self )
lowercase__ = self.lengths > 11
lowercase__ = self.token_ids[indices]
lowercase__ = self.lengths[indices]
lowercase__ = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def snake_case__ ( self ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowercase__ = self.params.special_tok_ids["unk_token"]
lowercase__ = len(self )
lowercase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowercase__ = (unk_occs / self.lengths) < 0.5
lowercase__ = self.token_ids[indices]
lowercase__ = self.lengths[indices]
lowercase__ = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def snake_case__ ( self ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = [t[0] for t in batch]
lowercase__ = [t[1] for t in batch]
assert len(__lowercase ) == len(__lowercase )
# Max for paddings
lowercase__ = max(__lowercase )
# Pad token ids
if self.params.mlm:
lowercase__ = self.params.special_tok_ids["pad_token"]
else:
lowercase__ = self.params.special_tok_ids["unk_token"]
lowercase__ = [list(t.astype(__lowercase ) ) + [pad_idx] * (max_seq_len_ - len(__lowercase )) for t in token_ids]
assert len(tk_ ) == len(__lowercase )
assert all(len(__lowercase ) == max_seq_len_ for t in tk_ )
lowercase__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowercase__ = torch.tensor(__lowercase ) # (bs)
return tk_t, lg_t
| 715 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668 | 0 |
"""simple docstring"""
import sys
import turtle
def __a ( A , A ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __a ( A , A , A , A , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
triangle(_lowerCAmelCase , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , get_mid(_lowerCAmelCase , _lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
lowerCAmelCase_: int = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
lowerCAmelCase_: Tuple = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 716 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCAmelCase_: List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class a__ ( datasets.BuilderConfig ):
snake_case_ = 10000
snake_case_ = None
snake_case_ = None
class a__ ( datasets.ArrowBasedBuilder ):
snake_case_ = ParquetConfig
def snake_case__ ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowercase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase, (str, list, tuple) ):
lowercase__ = data_files
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowercase__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )]
lowercase__ = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowercase__ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_UpperCAmelCase ):
with open(_UpperCAmelCase, "rb" ) as f:
lowercase__ = datasets.Features.from_arrow_schema(pq.read_schema(_UpperCAmelCase ) )
break
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase, gen_kwargs={"files": files} ) )
return splits
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase__ = table_cast(_UpperCAmelCase, self.info.features.arrow_schema )
return pa_table
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase, "rb" ) as f:
lowercase__ = pq.ParquetFile(_UpperCAmelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size, columns=self.config.columns ) ):
lowercase__ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(_UpperCAmelCase )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(_UpperCAmelCase )}: {e}''' )
raise
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "markuplm"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
# additional properties
lowercase__ = max_depth
lowercase__ = max_xpath_tag_unit_embeddings
lowercase__ = max_xpath_subs_unit_embeddings
lowercase__ = tag_pad_id
lowercase__ = subs_pad_id
lowercase__ = xpath_unit_hidden_size
| 668 | 0 |
"""simple docstring"""
lowerCAmelCase_: Optional[Any] = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 718 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase_: Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_: str = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class a__ ( __a ):
snake_case_ = "mctct"
def __init__( self, _UpperCAmelCase=8065, _UpperCAmelCase=1536, _UpperCAmelCase=36, _UpperCAmelCase=6144, _UpperCAmelCase=4, _UpperCAmelCase=384, _UpperCAmelCase=920, _UpperCAmelCase=1E-5, _UpperCAmelCase=0.3, _UpperCAmelCase="relu", _UpperCAmelCase=0.02, _UpperCAmelCase=0.3, _UpperCAmelCase=0.3, _UpperCAmelCase=1, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=1, _UpperCAmelCase=0.3, _UpperCAmelCase=1, _UpperCAmelCase=(7,), _UpperCAmelCase=(3,), _UpperCAmelCase=80, _UpperCAmelCase=1, _UpperCAmelCase=None, _UpperCAmelCase="sum", _UpperCAmelCase=False, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**lowerCAmelCase_, pad_token_id=lowerCAmelCase_, bos_token_id=lowerCAmelCase_, eos_token_id=lowerCAmelCase_ )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = num_attention_heads
lowercase__ = attention_head_dim
lowercase__ = max_position_embeddings
lowercase__ = layer_norm_eps
lowercase__ = layerdrop
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = pad_token_id
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = conv_glu_dim
lowercase__ = conv_dropout
lowercase__ = num_conv_layers
lowercase__ = input_feat_per_channel
lowercase__ = input_channels
lowercase__ = conv_channels
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowercase__ = list(lowerCAmelCase_ )
lowercase__ = list(lowerCAmelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 719 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a__ ( unittest.TestCase ):
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
sample_size=(32, 64), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("AttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "AttnUpBlock2D"), )
return model
@property
def snake_case__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), cross_attention_dim=10, )
return model
@property
def snake_case__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
sample_size=(128, 64), in_channels=1, out_channels=1, latent_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"), up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"), )
lowercase__ = UNetaDModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("AttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "AttnUpBlock2D"), )
return vqvae, unet
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ = Mel(
x_res=self.dummy_unet.config.sample_size[1], y_res=self.dummy_unet.config.sample_size[0], )
lowercase__ = DDPMScheduler()
lowercase__ = AudioDiffusionPipeline(vqvae=__a, unet=self.dummy_unet, mel=__a, scheduler=__a )
lowercase__ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowercase__ = torch.Generator(device=__a ).manual_seed(42 )
lowercase__ = pipe(generator=__a, steps=4 )
lowercase__ = output.audios[0]
lowercase__ = output.images[0]
lowercase__ = torch.Generator(device=__a ).manual_seed(42 )
lowercase__ = pipe(generator=__a, steps=4, return_dict=__a )
lowercase__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowercase__ = np.frombuffer(image.tobytes(), dtype="uint8" )[:10]
lowercase__ = np.frombuffer(image_from_tuple.tobytes(), dtype="uint8" )[:10]
lowercase__ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowercase__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], )
lowercase__ = DDIMScheduler()
lowercase__ = self.dummy_vqvae_and_unet
lowercase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=__a, scheduler=__a )
lowercase__ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
np.random.seed(0 )
lowercase__ = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowercase__ = torch.Generator(device=__a ).manual_seed(42 )
lowercase__ = pipe(raw_audio=__a, generator=__a, start_step=5, steps=10 )
lowercase__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowercase__ = np.frombuffer(image.tobytes(), dtype="uint8" )[:10]
lowercase__ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowercase__ = self.dummy_unet_condition
lowercase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=__a, mel=__a, scheduler=__a )
lowercase__ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
np.random.seed(0 )
lowercase__ = torch.rand((1, 1, 10) )
lowercase__ = pipe(generator=__a, encoding=__a )
lowercase__ = output.images[0]
lowercase__ = np.frombuffer(image.tobytes(), dtype="uint8" )[:10]
lowercase__ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = torch_device
lowercase__ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
lowercase__ = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowercase__ = torch.Generator(device=__a ).manual_seed(42 )
lowercase__ = pipe(generator=__a )
lowercase__ = output.audios[0]
lowercase__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowercase__ = np.frombuffer(image.tobytes(), dtype="uint8" )[:10]
lowercase__ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 720 |
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Any = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: List[Any] = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: str = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668 | 0 |
import os
import sys
import transformers
lowerCAmelCase_: Tuple = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 700 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 )
lowercase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase, [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 0 |
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_: Dict = 1.6021E-19 # units = C
def __a ( A , A , A , ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
"""simple docstring"""
import itertools
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ):
'''simple docstring'''
lowercase__ = 2
while True:
if is_prime(A ):
yield num
num += 1
def __a ( A = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 0 |
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase_: Optional[int] = logging.getLogger()
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser()
parser.add_argument("-f" )
lowercase__ = parser.parse_args()
return args.f
class a__ ( __snake_case ):
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCamelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0, "run_glue_deebert.py" )
with patch.object(__UpperCamelCase, "argv", __UpperCamelCase ):
lowercase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(__UpperCamelCase, 0.666 )
@slow
@require_torch_non_multi_gpu
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(__UpperCamelCase )
lowercase__ = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(__UpperCamelCase )
lowercase__ = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(__UpperCamelCase )
| 702 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class a__ ( a__ ):
def __init__( self, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = path_or_paths
lowercase__ = split if split or isinstance(lowercase__, lowercase__ ) else '''train'''
lowercase__ = features
lowercase__ = cache_dir
lowercase__ = keep_in_memory
lowercase__ = streaming
lowercase__ = num_proc
lowercase__ = kwargs
@abstractmethod
def snake_case__ ( self ):
'''simple docstring'''
pass
class a__ ( a__ ):
def __init__( self, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = features
lowercase__ = cache_dir
lowercase__ = keep_in_memory
lowercase__ = streaming
lowercase__ = num_proc
lowercase__ = kwargs
@abstractmethod
def snake_case__ ( self ):
'''simple docstring'''
pass
| 703 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class a__ :
def __init__( self ):
'''simple docstring'''
lowercase__ = {}
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {}
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(lowercase__ )
if nodea not in self.connections:
self.add_node(lowercase__ )
lowercase__ = probability
def snake_case__ ( self ):
'''simple docstring'''
return list(self.connections )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = 0
lowercase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase__ = Counter(graph.get_nodes() )
lowercase__ = start
for _ in range(SCREAMING_SNAKE_CASE__ ):
lowercase__ = graph.transition(SCREAMING_SNAKE_CASE__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 668 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( __SCREAMING_SNAKE_CASE ):
snake_case_ = 42
class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self, _UpperCAmelCase = 3, _UpperCAmelCase = 3, _UpperCAmelCase = ("DownEncoderBlock2D",), _UpperCAmelCase = ("UpDecoderBlock2D",), _UpperCAmelCase = (64,), _UpperCAmelCase = 1, _UpperCAmelCase = "silu", _UpperCAmelCase = 3, _UpperCAmelCase = 32, _UpperCAmelCase = 256, _UpperCAmelCase = 32, _UpperCAmelCase = None, _UpperCAmelCase = 0.18_215, _UpperCAmelCase = "group", ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
lowercase__ = Encoder(
in_channels=__snake_case, out_channels=__snake_case, down_block_types=__snake_case, block_out_channels=__snake_case, layers_per_block=__snake_case, act_fn=__snake_case, norm_num_groups=__snake_case, double_z=__snake_case, )
lowercase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowercase__ = nn.Convad(__snake_case, __snake_case, 1 )
lowercase__ = VectorQuantizer(__snake_case, __snake_case, beta=0.25, remap=__snake_case, sane_index_shape=__snake_case )
lowercase__ = nn.Convad(__snake_case, __snake_case, 1 )
# pass init params to Decoder
lowercase__ = Decoder(
in_channels=__snake_case, out_channels=__snake_case, up_block_types=__snake_case, block_out_channels=__snake_case, layers_per_block=__snake_case, act_fn=__snake_case, norm_num_groups=__snake_case, norm_type=__snake_case, )
@apply_forward_hook
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = True ):
'''simple docstring'''
lowercase__ = self.encoder(__snake_case )
lowercase__ = self.quant_conv(__snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__snake_case )
@apply_forward_hook
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = False, _UpperCAmelCase = True ):
'''simple docstring'''
if not force_not_quantize:
lowercase__ = self.quantize(__snake_case )
else:
lowercase__ = h
lowercase__ = self.post_quant_conv(__snake_case )
lowercase__ = self.decoder(__snake_case, quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__snake_case )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase = True ):
'''simple docstring'''
lowercase__ = sample
lowercase__ = self.encode(__snake_case ).latents
lowercase__ = self.decode(__snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__snake_case )
| 705 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ), 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ), 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = state_dict.pop(A )
lowercase__ = val
def __a ( A ):
'''simple docstring'''
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def __a ( A ):
'''simple docstring'''
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:2_56, :]
lowercase__ = in_proj_bias[:2_56]
lowercase__ = in_proj_weight[2_56:5_12, :]
lowercase__ = in_proj_bias[2_56:5_12]
lowercase__ = in_proj_weight[-2_56:, :]
lowercase__ = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:2_56, :]
lowercase__ = in_proj_bias[:2_56]
lowercase__ = in_proj_weight[2_56:5_12, :]
lowercase__ = in_proj_bias[2_56:5_12]
lowercase__ = in_proj_weight[-2_56:, :]
lowercase__ = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:2_56, :]
lowercase__ = in_proj_bias_cross_attn[:2_56]
lowercase__ = in_proj_weight_cross_attn[2_56:5_12, :]
lowercase__ = in_proj_bias_cross_attn[2_56:5_12]
lowercase__ = in_proj_weight_cross_attn[-2_56:, :]
lowercase__ = in_proj_bias_cross_attn[-2_56:]
def __a ( A , A ):
'''simple docstring'''
lowercase__ , lowercase__ = image.size
lowercase__ = max(A , A )
lowercase__ = 8_00 if "detection" in checkpoint_url else 10_00
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __a ( A ):
'''simple docstring'''
lowercase__ = F.to_tensor(A )
lowercase__ = F.normalize(A , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __a ( A , A , A ):
'''simple docstring'''
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(A , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(A , A , A )
lowercase__ = rename_backbone_keys(A )
# query, key and value matrices need special treatment
read_in_q_k_v(A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(A )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 1_25
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=8_00 if "detection" in checkpoint_url else 10_00 )
lowercase__ = TableTransformerForObjectDetection(A )
model.load_state_dict(A )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=A )
lowercase__ = Image.open(A ).convert("RGB" )
lowercase__ = normalize(resize(A , A ) ).unsqueeze(0 )
lowercase__ = model(A )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
lowercase__ = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
lowercase__ = (1, 1_25, 7)
lowercase__ = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
lowercase__ = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , A , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(A )
image_processor.push_to_hub(A )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowerCAmelCase_ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 706 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 668 | 0 |
"""simple docstring"""
from __future__ import annotations
def __a ( A = 4 ):
'''simple docstring'''
lowercase__ = abs(lowercase_ ) or 4
return [[1 + x + y * row_size for x in range(lowercase_ )] for y in range(lowercase_ )]
def __a ( A ):
'''simple docstring'''
return reverse_row(transpose(lowercase_ ) )
# OR.. transpose(reverse_column(matrix))
def __a ( A ):
'''simple docstring'''
return reverse_row(reverse_column(lowercase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def __a ( A ):
'''simple docstring'''
return reverse_column(transpose(lowercase_ ) )
# OR.. transpose(reverse_row(matrix))
def __a ( A ):
'''simple docstring'''
lowercase__ = [list(lowercase_ ) for x in zip(*lowercase_ )]
return matrix
def __a ( A ):
'''simple docstring'''
lowercase__ = matrix[::-1]
return matrix
def __a ( A ):
'''simple docstring'''
lowercase__ = [x[::-1] for x in matrix]
return matrix
def __a ( A ):
'''simple docstring'''
for i in matrix:
print(*lowercase_ )
if __name__ == "__main__":
lowerCAmelCase_: Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
lowerCAmelCase_: Optional[Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
lowerCAmelCase_: Optional[int] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 707 |
"""simple docstring"""
from typing import Any
import numpy as np
def __a ( A ):
'''simple docstring'''
return np.array_equal(A , matrix.conjugate().T )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = v.conjugate().T
lowercase__ = v_star.dot(A )
assert isinstance(A , np.ndarray )
return (v_star_dot.dot(A )) / (v_star.dot(A ))
def __a ( ):
'''simple docstring'''
lowercase__ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
lowercase__ = np.array([[1], [2], [3]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
print(rayleigh_quotient(A , A ) )
lowercase__ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A ), f'''{a} is not hermitian.'''
assert rayleigh_quotient(A , A ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 668 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCAmelCase_: List[str] = "<<<<<<< This should probably be modified because it mentions: "
lowerCAmelCase_: List[str] = "=======\n>>>>>>>\n"
lowerCAmelCase_: Tuple = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
lowerCAmelCase_: Optional[Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value(\'\1\')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value(\'string\')"),
(R"tfds\.features\.Text\(", R"datasets.Value(\'string\'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def __a ( A ):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class a__ ( UpperCAmelCase_ ):
@staticmethod
def snake_case__ ( _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = parser.add_parser(
"convert", help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.", )
train_parser.add_argument(
"--tfds_path", type=_snake_case, required=_snake_case, help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.", )
train_parser.add_argument(
"--datasets_directory", type=_snake_case, required=_snake_case, help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=_snake_case )
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, *_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = get_logger("datasets-cli/converting" )
lowercase__ = tfds_path
lowercase__ = datasets_directory
def snake_case__ ( self ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
lowercase__ = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__ = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
lowercase__ = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
lowercase__ = []
lowercase__ = []
lowercase__ = {}
if os.path.isdir(self._tfds_path ):
lowercase__ = os.listdir(_snake_case )
else:
lowercase__ = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
lowercase__ = os.path.join(_snake_case, _snake_case )
lowercase__ = os.path.join(_snake_case, _snake_case )
if not os.path.isfile(_snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(_snake_case, encoding="utf-8" ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = False
lowercase__ = False
lowercase__ = []
for line in lines:
lowercase__ = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__ = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowercase__ = ""
continue
elif "from absl import logging" in out_line:
lowercase__ = "from datasets import logging\n"
elif "getLogger" in out_line:
lowercase__ = out_line.replace("getLogger", "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__ = True
lowercase__ = list(filter(lambda _UpperCAmelCase : e in out_line, _snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_snake_case ) + "\n" )
out_lines.append(_snake_case )
out_lines.append(_snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__ = re.sub(_snake_case, _snake_case, _snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__ = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)", _snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
lowercase__ = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__ = True
out_lines.append(_snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__ = f_name.replace(".py", "" )
lowercase__ = os.path.join(_snake_case, _snake_case )
lowercase__ = os.path.join(_snake_case, _snake_case )
os.makedirs(_snake_case, exist_ok=_snake_case )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_snake_case )
if needs_manual_update:
with_manual_update.append(_snake_case )
with open(_snake_case, "w", encoding="utf-8" ) as f:
f.writelines(_snake_case )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
lowercase__ = os.path.basename(_snake_case )
lowercase__ = imports_to_builder_map[f_name.replace(".py", "" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(_snake_case, _snake_case )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 708 |
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class a__ ( _a , unittest.TestCase ):
snake_case_ = PriorTransformer
snake_case_ = "hidden_states"
@property
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = 4
lowercase__ = 8
lowercase__ = 7
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
@property
def snake_case__ ( self ):
'''simple docstring'''
return (4, 8)
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
lowercase__ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy", output_loading_info=_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(_UpperCAmelCase )
lowercase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ , lowercase__ = self.prepare_init_args_and_inputs_for_common()
lowercase__ = self.model_class(**_UpperCAmelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2], _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
lowercase__ = model.to(_UpperCAmelCase )
if hasattr(_UpperCAmelCase, "set_default_attn_processor" ):
model.set_default_attn_processor()
lowercase__ = self.get_dummy_seed_input()
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
lowercase__ = output[0, :5].flatten().cpu()
print(_UpperCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
lowercase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(_UpperCAmelCase, _UpperCAmelCase, rtol=1E-2 ) )
@slow
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase=1, _UpperCAmelCase=768, _UpperCAmelCase=77, _UpperCAmelCase=0 ):
'''simple docstring'''
torch.manual_seed(_UpperCAmelCase )
lowercase__ = batch_size
lowercase__ = embedding_dim
lowercase__ = num_embeddings
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, embedding_dim) ).to(_UpperCAmelCase )
lowercase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_UpperCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" )
model.to(_UpperCAmelCase )
lowercase__ = self.get_dummy_seed_input(seed=_UpperCAmelCase )
with torch.no_grad():
lowercase__ = model(**_UpperCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
lowercase__ = sample[0, :8].flatten().cpu()
print(_UpperCAmelCase )
lowercase__ = torch.tensor(_UpperCAmelCase )
assert torch_all_close(_UpperCAmelCase, _UpperCAmelCase, atol=1E-3 )
| 668 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: Dict = logging.get_logger(__name__)
class a__ ( _lowerCAmelCase ):
snake_case_ = 'timm_backbone'
def __init__( self, _UpperCAmelCase=None, _UpperCAmelCase=3, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase__ = backbone
lowercase__ = num_channels
lowercase__ = features_only
lowercase__ = use_pretrained_backbone
lowercase__ = True
lowercase__ = out_indices if out_indices is not None else (-1,)
| 709 |
"""simple docstring"""
lowerCAmelCase_: Any = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(A )
lowercase__ = "".join(bin(A )[2:].zfill(8 ) for byte in data )
lowercase__ = len(A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b"=" * ((6 - len(A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A ) % 6)
else:
lowercase__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A ) , 6 ) ).encode()
+ padding
)
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ) and not isinstance(A , A ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A , A ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A ) , 8 )
]
return bytes(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class a__ ( unittest.TestCase ):
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = jnp.ones((batch_size, length) ) / length
return scores
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = None
lowercase__ = 20
lowercase__ = self._get_uniform_logits(batch_size=2, length=_a )
# tweak scores to not be uniform anymore
lowercase__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowercase__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowercase__ = jax.nn.softmax(_a, axis=-1 )
lowercase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowercase__ = jax.nn.softmax(temp_dist_warper_sharper(_a, scores.copy(), cur_len=_a ), axis=-1 )
lowercase__ = jax.nn.softmax(temp_dist_warper_smoother(_a, scores.copy(), cur_len=_a ), axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_sharp[0, :], atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :], warped_prob_smooth[0, :], atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max(), warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min(), warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max(), warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min(), warped_prob_smooth[1, :].min() )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = None
lowercase__ = 10
lowercase__ = 2
# create ramp distribution
lowercase__ = np.broadcast_to(np.arange(_a )[None, :], (batch_size, vocab_size) ).copy()
lowercase__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowercase__ = FlaxTopKLogitsWarper(3 )
lowercase__ = top_k_warp(_a, _a, cur_len=_a )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist(), 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist(), 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowercase__ = 5
lowercase__ = FlaxTopKLogitsWarper(top_k=1, filter_value=0.0, min_tokens_to_keep=3 )
lowercase__ = np.broadcast_to(np.arange(_a )[None, :], (batch_size, length) ).copy()
lowercase__ = top_k_warp_safety_check(_a, _a, cur_len=_a )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist(), [2, 2] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = None
lowercase__ = 10
lowercase__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowercase__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowercase__ = FlaxTopPLogitsWarper(0.8 )
lowercase__ = np.exp(top_p_warp(_a, _a, cur_len=_a ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowercase__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_a, _a, atol=1E-3 ) )
# check edge cases with negative and extreme logits
lowercase__ = np.broadcast_to(np.arange(_a )[None, :], (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowercase__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowercase__ = FlaxTopPLogitsWarper(0.9, min_tokens_to_keep=2, filter_value=0.0 )
lowercase__ = top_p_warp(_a, _a, cur_len=_a )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist(), [3, 2] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 20
lowercase__ = 4
lowercase__ = 0
lowercase__ = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=_a )
# check that min length is applied at length 5
lowercase__ = ids_tensor((batch_size, 20), vocab_size=20 )
lowercase__ = 5
lowercase__ = self._get_uniform_logits(_a, _a )
lowercase__ = min_dist_processor(_a, _a, cur_len=_a )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist(), 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
lowercase__ = self._get_uniform_logits(_a, _a )
lowercase__ = 15
lowercase__ = min_dist_processor(_a, _a, cur_len=_a )
self.assertFalse(jnp.isinf(_a ).any() )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 20
lowercase__ = 4
lowercase__ = 0
lowercase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_a )
# check that all scores are -inf except the bos_token_id score
lowercase__ = ids_tensor((batch_size, 1), vocab_size=20 )
lowercase__ = 1
lowercase__ = self._get_uniform_logits(_a, _a )
lowercase__ = logits_processor(_a, _a, cur_len=_a )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist(), 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowercase__ = 3
lowercase__ = self._get_uniform_logits(_a, _a )
lowercase__ = logits_processor(_a, _a, cur_len=_a )
self.assertFalse(jnp.isinf(_a ).any() )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 20
lowercase__ = 4
lowercase__ = 0
lowercase__ = 5
lowercase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_a, eos_token_id=_a )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowercase__ = ids_tensor((batch_size, 4), vocab_size=20 )
lowercase__ = 4
lowercase__ = self._get_uniform_logits(_a, _a )
lowercase__ = logits_processor(_a, _a, cur_len=_a )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist(), 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowercase__ = 3
lowercase__ = self._get_uniform_logits(_a, _a )
lowercase__ = logits_processor(_a, _a, cur_len=_a )
self.assertFalse(jnp.isinf(_a ).any() )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 10
lowercase__ = 15
lowercase__ = 2
lowercase__ = 1
lowercase__ = 15
# dummy input_ids and scores
lowercase__ = ids_tensor((batch_size, sequence_length), _a )
lowercase__ = input_ids.copy()
lowercase__ = self._get_uniform_logits(_a, _a )
lowercase__ = scores.copy()
# instantiate all dist processors
lowercase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase__ = FlaxTopKLogitsWarper(3 )
lowercase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase__ = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=_a )
lowercase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_a )
lowercase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_a, eos_token_id=_a )
lowercase__ = 10
# no processor list
lowercase__ = temp_dist_warp(_a, _a, cur_len=_a )
lowercase__ = top_k_warp(_a, _a, cur_len=_a )
lowercase__ = top_p_warp(_a, _a, cur_len=_a )
lowercase__ = min_dist_proc(_a, _a, cur_len=_a )
lowercase__ = bos_dist_proc(_a, _a, cur_len=_a )
lowercase__ = eos_dist_proc(_a, _a, cur_len=_a )
# with processor list
lowercase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase__ = processor(_a, _a, cur_len=_a )
# scores should be equal
self.assertTrue(jnp.allclose(_a, _a, atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = 4
lowercase__ = 10
lowercase__ = 15
lowercase__ = 2
lowercase__ = 1
lowercase__ = 15
# dummy input_ids and scores
lowercase__ = ids_tensor((batch_size, sequence_length), _a )
lowercase__ = input_ids.copy()
lowercase__ = self._get_uniform_logits(_a, _a )
lowercase__ = scores.copy()
# instantiate all dist processors
lowercase__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowercase__ = FlaxTopKLogitsWarper(3 )
lowercase__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowercase__ = FlaxMinLengthLogitsProcessor(min_length=10, eos_token_id=_a )
lowercase__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_a )
lowercase__ = FlaxForcedEOSTokenLogitsProcessor(max_length=_a, eos_token_id=_a )
lowercase__ = 10
# no processor list
def run_no_processor_list(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
lowercase__ = temp_dist_warp(_a, _a, cur_len=_a )
lowercase__ = top_k_warp(_a, _a, cur_len=_a )
lowercase__ = top_p_warp(_a, _a, cur_len=_a )
lowercase__ = min_dist_proc(_a, _a, cur_len=_a )
lowercase__ = bos_dist_proc(_a, _a, cur_len=_a )
lowercase__ = eos_dist_proc(_a, _a, cur_len=_a )
return scores
# with processor list
def run_processor_list(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
lowercase__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowercase__ = processor(_a, _a, cur_len=_a )
return scores
lowercase__ = jax.jit(_a )
lowercase__ = jax.jit(_a )
lowercase__ = jitted_run_no_processor_list(_a, _a, _a )
lowercase__ = jitted_run_processor_list(_a, _a, _a )
# scores should be equal
self.assertTrue(jnp.allclose(_a, _a, atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist(), input_ids_comp.tolist() )
| 710 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __a ( A , A , A = "x" , A = 10**-10 , A = 1 , ):
'''simple docstring'''
lowercase__ = symbols(A )
lowercase__ = lambdify(A , A )
lowercase__ = lambdify(A , diff(A , A ) )
lowercase__ = starting_point
while True:
if diff_function(A ) != 0:
lowercase__ = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(F'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F'{newton_raphson("exp(x) - 1", 1_0, precision=0.005)}',
)
# Find root of cos(x)
print(F'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 668 | 0 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=UpperCamelCase__, image_processor=UpperCamelCase__, top_k=2 )
lowercase__ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__, [
{"score": ANY(UpperCamelCase__ ), "label": ANY(UpperCamelCase__ )},
{"score": ANY(UpperCamelCase__ ), "label": ANY(UpperCamelCase__ )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=UpperCamelCase__, feature_extractor=UpperCamelCase__, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(UpperCamelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(UpperCamelCase__, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 711 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Union[str, Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Union[str, Any] = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Any = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Tuple = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: Optional[Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase_: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class a__ ( lowerCAmelCase__ , unittest.TestCase ):
snake_case_ = CpmAntTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
lowercase__ = "今天天气真好!"
lowercase__ = ["今天", "天气", "真", "好", "!"]
lowercase__ = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
lowercase__ = "今天天气真好!"
lowercase__ = [tokenizer.bos_token] + tokens
lowercase__ = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), _lowerCamelCase )
lowercase__ = tokenizer.decode(_lowerCamelCase )
self.assertEqual(_lowerCamelCase, _lowerCamelCase )
| 712 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase_: Union[str, Any] = logging.get_logger(__name__)
class a__ ( _a ):
snake_case_ = ["audio_values", "audio_mask"]
def __init__( self, _UpperCAmelCase=2048, _UpperCAmelCase=1, _UpperCAmelCase=[16, 16], _UpperCAmelCase=128, _UpperCAmelCase=4_4100, _UpperCAmelCase=86, _UpperCAmelCase=2048, _UpperCAmelCase=0.0, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase, sampling_rate=_UpperCAmelCase, padding_value=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = spectrogram_length
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = feature_size // self.patch_size[1]
lowercase__ = n_fft
lowercase__ = sampling_rate // hop_length_to_sampling_rate
lowercase__ = sampling_rate
lowercase__ = padding_value
lowercase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=_UpperCAmelCase, min_frequency=0.0, max_frequency=22_050.0, sampling_rate=_UpperCAmelCase, norm="slaney", mel_scale="slaney", ).T
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = spectrogram(
_UpperCAmelCase, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel="dB", db_range=80.0, )
lowercase__ = log_spec[:, :-1]
lowercase__ = log_spec - 20.0
lowercase__ = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = True, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowercase__ = isinstance(_UpperCAmelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowercase__ = is_batched_numpy or (
isinstance(_UpperCAmelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase, np.ndarray ):
lowercase__ = np.asarray(_UpperCAmelCase, dtype=np.floataa )
elif isinstance(_UpperCAmelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], _UpperCAmelCase ):
lowercase__ = [np.asarray(_UpperCAmelCase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__ = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__ = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
lowercase__ = audio_features[i]
lowercase__ = feature
# return as BatchFeature
if return_attention_mask:
lowercase__ = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
lowercase__ = {"audio_values": padded_audio_features}
lowercase__ = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
return encoded_inputs
| 668 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=7, _UpperCAmelCase=3, _UpperCAmelCase=18, _UpperCAmelCase=30, _UpperCAmelCase=400, _UpperCAmelCase=True, _UpperCAmelCase=None, _UpperCAmelCase=True, ):
'''simple docstring'''
lowercase__ = size if size is not None else {"height": 18, "width": 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = apply_ocr
def snake_case__ ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a__ ( _A , unittest.TestCase ):
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = LayoutLMvaImageProcessingTester(self )
@property
def snake_case__ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase, "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase, "size" ) )
self.assertTrue(hasattr(_UpperCAmelCase, "apply_ocr" ) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
self.assertIsInstance(encoding.words, _UpperCAmelCase )
self.assertIsInstance(encoding.boxes, _UpperCAmelCase )
# Test batched
lowercase__ = image_processing(_UpperCAmelCase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=_UpperCAmelCase, numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(_UpperCAmelCase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=_UpperCAmelCase, torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(_UpperCAmelCase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowercase__ = load_dataset("hf-internal-testing/fixtures_docvqa", split="test" )
lowercase__ = Image.open(ds[0]["file"] ).convert("RGB" )
lowercase__ = image_processing(_UpperCAmelCase, return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ), len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowercase__ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowercase__ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, _UpperCAmelCase )
self.assertListEqual(encoding.boxes, _UpperCAmelCase )
# with apply_OCR = False
lowercase__ = LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase )
lowercase__ = image_processing(_UpperCAmelCase, return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224) )
| 713 |
"""simple docstring"""
from __future__ import annotations
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCAmelCase_: Optional[Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def __a ( A ):
'''simple docstring'''
if not isinstance(A , A ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
lowercase__ = []
for num in range(len(A ) ):
lowercase__ = 0
while 2 * i * i <= odd_composites[num]:
lowercase__ = odd_composites[num] - 2 * i * i
if is_prime(A ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(A ) == n:
return list_nums
return []
def __a ( ):
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def __a ( A , A , A ):
'''simple docstring'''
lowercase__ = {
"en": "Machine learning is great, isn\'t it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowercase__ = f'''{src_lang}-{tgt_lang}'''
lowercase__ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(a__ , exist_ok=a__ )
lowercase__ = os.path.join(a__ , "README.md" )
print(f'''Generating {path}''' )
with open(a__ , "w" , encoding="utf-8" ) as f:
f.write(a__ )
# make sure we are under the root of the project
lowerCAmelCase_: List[Any] = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase_: Optional[Any] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: Dict = model_name.split("-")
lowerCAmelCase_: Union[str, Any] = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 714 |
"""simple docstring"""
import os
import sys
lowerCAmelCase_: Any = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowerCAmelCase_: Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A , **A )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A , **A )
@add_start_docstrings(AutoModel.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModel.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A , **A )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __a ( *A , **A ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A , **A )
| 668 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def __a ( A , A , A ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_UpperCAmelCase ):
lowercase__ = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxBertModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
lowercase__ = AutoTokenizer.from_pretrained(_UpperCAmelCase )
lowercase__ = FlaxRobertaModel.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer("Do you support jax jitted function?", return_tensors=TensorType.JAX )
@jax.jit
def eval(**_UpperCAmelCase ):
return model(**_UpperCAmelCase )
eval(**_UpperCAmelCase ).block_until_ready()
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "bert-base is not a local folder and is not a valid model identifier" ):
lowercase__ = FlaxAutoModel.from_pretrained("bert-base" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
lowercase__ = FlaxAutoModel.from_pretrained(_UpperCAmelCase, revision="aaaaaa" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase, "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack", ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case__ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(_UpperCAmelCase, "Use `from_pt=True` to load this model" ):
lowercase__ = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 668 | 0 |
"""simple docstring"""
import os
import sys
import unittest
lowerCAmelCase_: Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCAmelCase_: List[str] = os.path.join(git_repo_path, "src", "transformers")
lowerCAmelCase_: Any = '''
{0} = None
'''
lowerCAmelCase_: Dict = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
lowerCAmelCase_: str = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class a__ ( unittest.TestCase ):
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = find_backend(" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")" )
self.assertIsNone(__SCREAMING_SNAKE_CASE )
lowercase__ = find_backend(" if not is_tokenizers_available():" )
self.assertEqual(__SCREAMING_SNAKE_CASE, "tokenizers" )
lowercase__ = find_backend(" if not is_tensorflow_text_available():" )
self.assertEqual(__SCREAMING_SNAKE_CASE, "tensorflow_text" )
lowercase__ = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):" )
self.assertEqual(__SCREAMING_SNAKE_CASE, "sentencepiece_and_tokenizers" )
lowercase__ = find_backend(
" if not (is_sentencepiece_available() and is_tensorflow_text_available()):" )
self.assertEqual(__SCREAMING_SNAKE_CASE, "sentencepiece_and_tensorflow_text" )
lowercase__ = find_backend(
" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" )
self.assertEqual(__SCREAMING_SNAKE_CASE, "sentencepiece_and_tokenizers_and_vision" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch", __SCREAMING_SNAKE_CASE )
self.assertIn("tensorflow_text", __SCREAMING_SNAKE_CASE )
self.assertIn("sentencepiece_and_tokenizers", __SCREAMING_SNAKE_CASE )
# Likewise, we can't assert on the exact content of a key
self.assertIn("BertModel", objects["torch"] )
self.assertIn("TFBertModel", objects["tf"] )
self.assertIn("FlaxBertModel", objects["flax"] )
self.assertIn("BertModel", objects["torch"] )
self.assertIn("TFBertTokenizer", objects["tensorflow_text"] )
self.assertIn("convert_slow_tokenizer", objects["sentencepiece_and_tokenizers"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = create_dummy_object("CONSTANT", "\'torch\'" )
self.assertEqual(__SCREAMING_SNAKE_CASE, "\nCONSTANT = None\n" )
lowercase__ = create_dummy_object("function", "\'torch\'" )
self.assertEqual(
__SCREAMING_SNAKE_CASE, "\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n" )
lowercase__ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n"
lowercase__ = create_dummy_object("FakeClass", "\'torch\'" )
self.assertEqual(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n"
lowercase__ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"], __SCREAMING_SNAKE_CASE )
| 716 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_: str = logging.get_logger(__name__)
lowerCAmelCase_: List[Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( _a ):
snake_case_ = "data2vec-vision"
def __init__( self, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.0, _UpperCAmelCase=0.0, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=224, _UpperCAmelCase=16, _UpperCAmelCase=3, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=True, _UpperCAmelCase=[3, 5, 7, 11], _UpperCAmelCase=[1, 2, 3, 6], _UpperCAmelCase=True, _UpperCAmelCase=0.4, _UpperCAmelCase=256, _UpperCAmelCase=1, _UpperCAmelCase=False, _UpperCAmelCase=255, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class a__ ( _a ):
snake_case_ = version.parse("1.11" )
@property
def snake_case__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case__ ( self ):
'''simple docstring'''
return 1E-4
| 668 | 0 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase_: int = 6_3_7_8_1_3_7.0
lowerCAmelCase_: Optional[Any] = 6_3_5_6_7_5_2.3_1_4_2_4_5
lowerCAmelCase_: List[Any] = 6_3_7_8_1_3_7
def __a ( A , A , A , A ):
'''simple docstring'''
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
lowercase__ = atan((1 - flattening) * tan(radians(A ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(A ) ) )
lowercase__ = radians(A )
lowercase__ = radians(A )
# Equation
lowercase__ = sin((phi_a - phi_a) / 2 )
lowercase__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase__ = sqrt(sin_sq_phi + (cos(A ) * cos(A ) * sin_sq_lambda) )
return 2 * RADIUS * asin(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "markuplm"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
# additional properties
lowercase__ = max_depth
lowercase__ = max_xpath_tag_unit_embeddings
lowercase__ = max_xpath_subs_unit_embeddings
lowercase__ = tag_pad_id
lowercase__ = subs_pad_id
lowercase__ = xpath_unit_hidden_size
| 668 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase=13, _UpperCAmelCase=7, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=True, _UpperCAmelCase=99, _UpperCAmelCase=16, _UpperCAmelCase=36, _UpperCAmelCase=6, _UpperCAmelCase=6, _UpperCAmelCase=6, _UpperCAmelCase=37, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=16, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=3, _UpperCAmelCase=4, _UpperCAmelCase=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = embedding_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_hidden_groups
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, num_hidden_groups=self.num_hidden_groups, )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = AlbertModel(config=__A )
model.to(__A )
model.eval()
lowercase__ = model(__A, attention_mask=__A, token_type_ids=__A )
lowercase__ = model(__A, token_type_ids=__A )
lowercase__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = AlbertForPreTraining(config=__A )
model.to(__A )
model.eval()
lowercase__ = model(
__A, attention_mask=__A, token_type_ids=__A, labels=__A, sentence_order_label=__A, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, config.num_labels) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = AlbertForMaskedLM(config=__A )
model.to(__A )
model.eval()
lowercase__ = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = AlbertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
lowercase__ = model(
__A, attention_mask=__A, token_type_ids=__A, start_positions=__A, end_positions=__A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = AlbertForSequenceClassification(__A )
model.to(__A )
model.eval()
lowercase__ = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = AlbertForTokenClassification(config=__A )
model.to(__A )
model.eval()
lowercase__ = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = AlbertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
lowercase__ = model(
__A, attention_mask=__A, token_type_ids=__A, labels=__A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( _a , _a , unittest.TestCase ):
snake_case_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(__A, __A, return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
lowercase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=__A )
lowercase__ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=__A )
return inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = AlbertModelTester(self )
lowercase__ = ConfigTester(self, config_class=__A, hidden_size=37 )
def snake_case__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*__A )
@slow
def snake_case__ ( self ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AlbertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = AlbertModel.from_pretrained("albert-base-v2" )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ = model(__A, attention_mask=__A )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape, __A )
lowercase__ = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], __A, atol=1E-4 ) )
| 718 |
"""simple docstring"""
lowerCAmelCase_: Union[str, Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
lowerCAmelCase_: List[str] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
lowerCAmelCase_: Dict = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
lowerCAmelCase_: Optional[int] = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
lowerCAmelCase_: Tuple = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
lowerCAmelCase_: str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
lowerCAmelCase_: int = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 668 | 0 |
"""simple docstring"""
import os
def __a ( A ):
lowercase__ = len(grid[0] )
lowercase__ = len(_lowerCamelCase )
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
lowercase__ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowercase__ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowercase__ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowercase__ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowercase__ = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
lowercase__ = max_product
return largest
def __a ( ):
lowercase__ = []
with open(os.path.dirname(_lowerCamelCase ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
lowercase__ = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 719 |
"""simple docstring"""
from __future__ import annotations
def __a ( A , A ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("partitions must be a positive number!" )
if partitions > number_of_bytes:
raise ValueError("partitions can not > number_of_bytes!" )
lowercase__ = number_of_bytes // partitions
lowercase__ = []
for i in range(A ):
lowercase__ = i * bytes_per_partition + 1
lowercase__ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
import functools
def __a ( A , A ):
'''simple docstring'''
if not isinstance(lowercase_ , lowercase_ ) or not all(isinstance(lowercase_ , lowercase_ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(lowercase_ ) != 3 or not all(isinstance(lowercase_ , lowercase_ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(lowercase_ ) == 0:
return 0
if min(lowercase_ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(lowercase_ ) >= 3_66:
raise ValueError("All days elements should be less than 366" )
lowercase__ = set(lowercase_ )
@functools.cache
def dynamic_programming(A ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
"""simple docstring"""
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = process_name # process name
lowercase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowercase__ = arrival_time
lowercase__ = burst_time # remaining burst time
lowercase__ = 0 # total time of the process wait in ready queue
lowercase__ = 0 # time from arrival time to completion time
class a__ :
def __init__( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowercase__ = time_slices
# unfinished process is in this ready_queue
lowercase__ = queue
# current time
lowercase__ = current_time
# finished process is in this sequence queue
lowercase__ = deque()
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
return [q.burst_time for q in queue]
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of finished process
while len(_UpperCAmelCase ) != 0:
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowercase__ = 0
# set the process's turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# set the completion time
lowercase__ = self.current_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_UpperCAmelCase ) ):
lowercase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowercase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowercase__ = 0
# set the finish time
lowercase__ = self.current_time
# update the process' turnaround time because it is finished
lowercase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_UpperCAmelCase )
self.finish_queue.extend(_UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def snake_case__ ( self ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
lowercase__ , lowercase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCAmelCase_: Optional[int] = Process("P1", 0, 5_3)
lowerCAmelCase_: Union[str, Any] = Process("P2", 0, 1_7)
lowerCAmelCase_: str = Process("P3", 0, 6_8)
lowerCAmelCase_: int = Process("P4", 0, 2_4)
lowerCAmelCase_: Dict = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
lowerCAmelCase_: Any = Process("P1", 0, 5_3)
lowerCAmelCase_: Tuple = Process("P2", 0, 1_7)
lowerCAmelCase_: Optional[int] = Process("P3", 0, 6_8)
lowerCAmelCase_: List[Any] = Process("P4", 0, 2_4)
lowerCAmelCase_: Union[str, Any] = 3
lowerCAmelCase_: Any = [1_7, 2_5]
lowerCAmelCase_: Optional[Any] = deque([Pa, Pa, Pa, Pa])
lowerCAmelCase_: Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCAmelCase_: Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
F'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 668 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_: Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: List[Any] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_: List[Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase_: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase_: Dict = "pt"
elif is_tf_available():
lowerCAmelCase_: Dict = "tf"
else:
lowerCAmelCase_: str = "jax"
class a__ ( _a , unittest.TestCase ):
snake_case_ = ByTaTokenizer
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained("google/byt5-small" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=20, _UpperCAmelCase=5 ):
'''simple docstring'''
lowercase__ = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__ = tokenizer.decode([i], clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda _UpperCAmelCase : re.match(R"^[ a-zA-Z]+$", t[1] ), _UpperCAmelCase ) )
lowercase__ = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1], add_special_tokens=_UpperCAmelCase ), _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(_UpperCAmelCase, clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=_UpperCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__ = " " + output_txt
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] )
lowercase__ = tokenizer(["hi", "I went to the gym", ""] )
self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = "Unicode €."
lowercase__ = tokenizer(_UpperCAmelCase )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "Unicode €.</s>" )
lowercase__ = tokenizer("e è é ê ë" )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded["input_ids"], _UpperCAmelCase )
# decoding
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, "e è é ê ë</s>" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ), "e è é ê ë</s>" )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual((2, 37), batch.input_ids.shape )
self.assertEqual((2, 37), batch.attention_mask.shape )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowercase__ = tokenizer(_UpperCAmelCase, padding=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids", _UpperCAmelCase )
self.assertIn("attention_mask", _UpperCAmelCase )
self.assertNotIn("decoder_input_ids", _UpperCAmelCase )
self.assertNotIn("decoder_attention_mask", _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
"Summary of the text.",
"Another summary.",
]
lowercase__ = tokenizer(
text_target=_UpperCAmelCase, max_length=32, padding="max_length", truncation=_UpperCAmelCase, return_tensors=_UpperCAmelCase )
self.assertEqual(32, targets["input_ids"].shape[1] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.ta_base_tokenizer
lowercase__ = ["A long paragraph for summarization. </s>"]
lowercase__ = ["Summary of the text. </s>"]
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(_UpperCAmelCase, text_target=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase, batch["input_ids"][0] )
self.assertEqual(_UpperCAmelCase, batch["labels"][0] )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length, 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__ = after_tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length, 42 )
lowercase__ = tokenizer.__class__.from_pretrained(_UpperCAmelCase, model_max_length=43 )
self.assertEqual(tokenizer.model_max_length, 43 )
shutil.rmtree(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), encoding="utf-8" ) as json_file:
lowercase__ = json.load(_UpperCAmelCase )
lowercase__ = [F'''<extra_id_{i}>''' for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
lowercase__ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(_UpperCAmelCase, "special_tokens_map.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase, "tokenizer_config.json" ), "w", encoding="utf-8" ) as outfile:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, )
self.assertIn(
"an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=_UpperCAmelCase )]
lowercase__ = tokenizer_class.from_pretrained(
_UpperCAmelCase, additional_special_tokens=_UpperCAmelCase, )
self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ), )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_class.from_pretrained(_UpperCAmelCase )
self.assertTrue(tokenizer.decode([255] ) == "" )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(fast=_UpperCAmelCase, do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"]
lowercase__ = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
for attr in attributes_list:
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, attr + "_id", _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, _UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(getattr(_UpperCAmelCase, attr + "_id" ), _UpperCAmelCase )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [] )
setattr(_UpperCAmelCase, "additional_special_tokens_ids", [token_id_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens" ), [token_to_test_setters] )
self.assertListEqual(getattr(_UpperCAmelCase, "additional_special_tokens_ids" ), [token_id_to_test_setters] )
| 668 | 0 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_: Tuple = logging.get_logger(__name__)
lowerCAmelCase_: Dict = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase_: Union[str, Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __a ( A , A , A , A , A ):
'''simple docstring'''
for attribute in key.split("." ):
lowercase__ = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
lowercase__ = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __a ( A , A ):
'''simple docstring'''
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
lowercase__ = hf_model.adapter
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
lowercase__ = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(_lowerCamelCase )[0].split("." )[-2]
lowercase__ = mapped_key.replace("*" , _lowerCamelCase )
if "weight_g" in name:
lowercase__ = "weight_g"
elif "weight_v" in name:
lowercase__ = "weight_v"
elif "bias" in name:
lowercase__ = "bias"
elif "weight" in name:
lowercase__ = "weight"
else:
lowercase__ = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __a ( A , A , A , A , A ):
'''simple docstring'''
lowercase__ = full_name.split("conv_layers." )[-1]
lowercase__ = name.split("." )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def __a ( A , A , A , A ):
'''simple docstring'''
lowercase__ = full_name.split("adaptor." )[-1]
lowercase__ = name.split("." )
if items[1].isdigit():
lowercase__ = int(items[1] )
else:
lowercase__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
lowercase__ = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
lowercase__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
lowercase__ = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
lowercase__ = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
lowercase__ = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
lowercase__ = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(_lowerCamelCase )
def __a ( A ):
'''simple docstring'''
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
lowercase__ = emb.weight.data
return lin_layer
@torch.no_grad()
def __a ( A , A , A , A , A , A , A , A , A , A , A , ):
'''simple docstring'''
lowercase__ = WavaVecaConfig.from_pretrained(
_lowerCamelCase , add_adapter=_lowerCamelCase , adapter_stride=_lowerCamelCase , adapter_kernel_size=_lowerCamelCase , use_auth_token=_lowerCamelCase , output_hidden_size=_lowerCamelCase , )
lowercase__ = MBartConfig.from_pretrained(_lowerCamelCase )
# load model
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
lowercase__ = model[0].eval()
# load feature extractor
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(_lowerCamelCase , use_auth_token=_lowerCamelCase )
# set weights for wav2vec2 encoder
lowercase__ = WavaVecaModel(_lowerCamelCase )
recursively_load_weights_wavaveca(model.encoder , _lowerCamelCase )
# load decoder weights
lowercase__ = MBartForCausalLM(_lowerCamelCase )
lowercase__ , lowercase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_lowerCamelCase )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase__ = SpeechEncoderDecoderModel(encoder=_lowerCamelCase , decoder=_lowerCamelCase )
lowercase__ = False
lowercase__ = MBartaaTokenizer(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
lowercase__ = hf_wavavec.config.to_dict()
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer.eos_token_id
lowercase__ = "mbart50"
lowercase__ = "wav2vec2"
lowercase__ = tokenizer.eos_token_id
lowercase__ = 25_00_04
lowercase__ = tokenizer.eos_token_id
lowercase__ = SpeechEncoderDecoderConfig.from_dict(_lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
feature_extractor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_: Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_0_2_4, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=2_5_0_0_0_4, type=int, help="`decoder_start_token_id` of model config")
lowerCAmelCase_: Optional[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 700 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class a__ ( unittest.TestCase ):
snake_case_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = hf_hub_download(
repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase, image_processor=_UpperCAmelCase, top_k=2 )
lowercase__ = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase, [
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
{"score": ANY(_UpperCAmelCase ), "label": ANY(_UpperCAmelCase )},
], )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowercase__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} )
lowercase__ = pipeline(
"video-classification", model=_UpperCAmelCase, feature_extractor=_UpperCAmelCase, frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" )
lowercase__ = video_classifier(_UpperCAmelCase, top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}], )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
], top_k=2, )
self.assertEqual(
nested_simplify(_UpperCAmelCase, decimals=4 ), [
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
[{"score": 0.5_199, "label": "LABEL_0"}, {"score": 0.4_801, "label": "LABEL_1"}],
], )
@require_tf
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class a__ :
def __init__( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_UpperCAmelCase )
self.set_fail_transitions()
def snake_case__ ( self, _UpperCAmelCase, _UpperCAmelCase ):
'''simple docstring'''
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = 0
for character in keyword:
lowercase__ = self.find_next_state(_UpperCAmelCase, _UpperCAmelCase )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase__ = len(self.adlist ) - 1
else:
lowercase__ = next_state
self.adlist[current_state]["output"].append(_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(_UpperCAmelCase )
lowercase__ = 0
while q:
lowercase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_UpperCAmelCase )
lowercase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(_UpperCAmelCase, self.adlist[child]["value"] ) is None
and state != 0
):
lowercase__ = self.adlist[state]["fail_state"]
lowercase__ = self.find_next_state(
_UpperCAmelCase, self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
lowercase__ = 0
lowercase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {} # returns a dict with keywords and list of its occurrences
lowercase__ = 0
for i in range(len(_UpperCAmelCase ) ):
while (
self.find_next_state(_UpperCAmelCase, string[i] ) is None
and current_state != 0
):
lowercase__ = self.adlist[current_state]["fail_state"]
lowercase__ = self.find_next_state(_UpperCAmelCase, string[i] )
if next_state is None:
lowercase__ = 0
else:
lowercase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase__ = []
result[key].append(i - len(_UpperCAmelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
"""simple docstring"""
import itertools
import math
def __a ( A ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __a ( ):
'''simple docstring'''
lowercase__ = 2
while True:
if is_prime(A ):
yield num
num += 1
def __a ( A = 1_00_01 ):
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , A ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 668 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: Union[str, Any] = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class a__ ( _UpperCAmelCase ):
snake_case_ = "ctrl"
snake_case_ = ["past_key_values"]
snake_case_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, _UpperCAmelCase=24_6534, _UpperCAmelCase=256, _UpperCAmelCase=1280, _UpperCAmelCase=8192, _UpperCAmelCase=48, _UpperCAmelCase=16, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=1E-6, _UpperCAmelCase=0.02, _UpperCAmelCase=True, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = dff
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
super().__init__(**lowercase__ )
| 702 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 0 |
"""simple docstring"""
from itertools import product
def __a ( A , A ):
'''simple docstring'''
lowercase__ = sides_number
lowercase__ = max_face_number * dice_number
lowercase__ = [0] * (max_total + 1)
lowercase__ = 1
lowercase__ = range(SCREAMING_SNAKE_CASE_ , max_face_number + 1 )
for dice_numbers in product(SCREAMING_SNAKE_CASE_ , repeat=SCREAMING_SNAKE_CASE_ ):
lowercase__ = sum(SCREAMING_SNAKE_CASE_ )
totals_frequencies[total] += 1
return totals_frequencies
def __a ( ):
'''simple docstring'''
lowercase__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowercase__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowercase__ = 0
lowercase__ = 9
lowercase__ = 4 * 9
lowercase__ = 6
for peter_total in range(SCREAMING_SNAKE_CASE_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowercase__ = (4**9) * (6**6)
lowercase__ = peter_wins_count / total_games_number
lowercase__ = round(SCREAMING_SNAKE_CASE_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'{solution() = }')
| 703 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCAmelCase_: List[str] = 1_6
lowerCAmelCase_: Optional[Any] = 3_2
def __a ( A , A = 16 , A = "bert-base-cased" ):
'''simple docstring'''
lowercase__ = AutoTokenizer.from_pretrained(A )
lowercase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowercase__ = datasets.map(
A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(A , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A )
lowercase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
def __a ( A , A ):
'''simple docstring'''
lowercase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["lr"]
lowercase__ = int(config["num_epochs"] )
lowercase__ = int(config["seed"] )
lowercase__ = int(config["batch_size"] )
lowercase__ = args.model_name_or_path
set_seed(A )
lowercase__ , lowercase__ = get_dataloaders(A , A , A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A )
# Instantiate optimizer
lowercase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowercase__ = optimizer_cls(params=model.parameters() , lr=A )
if accelerator.state.deepspeed_plugin is not None:
lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowercase__ = 1
lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowercase__ = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=0 , num_training_steps=A , )
else:
lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
A , A , A , A , A )
# We need to keep track of how many total steps we have iterated over
lowercase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowercase__ = 0
# Now we train the model
lowercase__ = evaluate.load("glue" , "mrpc" )
lowercase__ = 0
lowercase__ = {}
for epoch in range(A , A ):
model.train()
for step, batch in enumerate(A ):
lowercase__ = model(**A )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
lowercase__ = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowercase__ , lowercase__ = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A ) - 1:
lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A , references=A , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , A )
lowercase__ = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
lowercase__ = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(A , A )
def __a ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A , )
parser.add_argument(
"--output_dir" , type=A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=A , default=A , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=A , default=3 , help="Number of train epochs." , )
lowercase__ = parser.parse_args()
lowercase__ = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 668 | 0 |
"""simple docstring"""
from __future__ import annotations
def __a ( A ):
'''simple docstring'''
lowercase__ = str(__lowercase )
return len(__lowercase ) == 9 and set(__lowercase ) == set("123456789" )
def __a ( ):
'''simple docstring'''
for base_num in range(99_99 , 49_99 , -1 ):
lowercase__ = 10_00_02 * base_num
if is_9_pandigital(__lowercase ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
lowercase__ = 1_00_20_03 * base_num
if is_9_pandigital(__lowercase ):
return candidate
return None
if __name__ == "__main__":
print(F'{solution() = }')
| 704 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( _a ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("num_inference_steps", 50),)
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 1000}
config.update(**_UpperCAmelCase )
return config
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self, _UpperCAmelCase=0, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
lowercase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = new_scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_UpperCAmelCase )
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps", _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_UpperCAmelCase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase, "set_timesteps" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase, "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
lowercase__ = scheduler.step(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase, time_step=_UpperCAmelCase )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 668 | 0 |
"""simple docstring"""
def __a ( A ):
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise TypeError("Input value must be an \'int\' type" )
lowercase__ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( _a , unittest.TestCase ):
snake_case_ = MgpstrTokenizer
snake_case_ = False
snake_case_ = {}
snake_case_ = False
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase__ = dict(zip(_UpperCAmelCase, range(len(_UpperCAmelCase ) ) ) )
lowercase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + "\n" )
def snake_case__ ( self, **_UpperCAmelCase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **_UpperCAmelCase )
def snake_case__ ( self, _UpperCAmelCase ):
'''simple docstring'''
lowercase__ = "tester"
lowercase__ = "tester"
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token} )
lowercase__ = tokenizer.encode([special_token], add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ), 1 )
lowercase__ = tokenizer.decode(_UpperCAmelCase, skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def snake_case__ ( self ):
'''simple docstring'''
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase__ , lowercase__ = self.get_input_output_texts(_UpperCAmelCase )
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase, add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase, _UpperCAmelCase )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ), 0 )
lowercase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase, _UpperCAmelCase )
self.assertEqual(text_a.replace(" ", "" ), _UpperCAmelCase )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def snake_case__ ( self ):
'''simple docstring'''
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def snake_case__ ( self ):
'''simple docstring'''
pass
| 668 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.